diff --git a/kdotpy-v1.0.0/.gitlab-ci.yml b/kdotpy-v1.0.0/.gitlab-ci.yml
new file mode 100644
index 0000000000000000000000000000000000000000..253e6d63d502718d1e19aa4c9038e949477ab2fe
--- /dev/null
+++ b/kdotpy-v1.0.0/.gitlab-ci.yml
@@ -0,0 +1,98 @@
+# Test: GitLab's software security default image. Only use analyzers valid for python
+include:
+  - template: Security/SAST.gitlab-ci.yml
+sast:  # redefine base template for all SAST jobs:
+  interruptible: true  # enable auto-cancel on new commits
+  variables:
+    SAST_DEFAULT_ANALYZERS: "bandit, semgrep"  # only use these two
+
+# Test: Pytest
+pytest:
+  stage: test
+  tags:
+    - avx  # only use GitLab Runners with avx tag (much faster!)
+  interruptible: true  # enable auto-cancel on new commits
+  # Official language image. Look for the different tagged releases at:
+  # https://hub.docker.com/r/library/python/tags/
+  image: python:3
+  # Change pip's cache directory to be inside the project directory since we can
+  # only cache local items.
+  variables:
+    PIP_CACHE_DIR: "$CI_PROJECT_DIR/.cache/pip"
+
+  # Pip's cache doesn't store the python packages
+  # https://pip.pypa.io/en/stable/reference/pip_install/#caching
+  #
+  # If you want to also cache the installed packages, you have to install
+  # them in a virtualenv and cache it as well.
+  cache:
+    paths:
+      - .cache/pip
+      - venv/
+
+  before_script:
+    - python -V  # Print out python version for debugging
+    - pip install virtualenv
+    - virtualenv venv
+    - source venv/bin/activate
+    - pip install .
+    - pip freeze  # Print installed packages for debugging
+
+  script:
+    - pip install pytest pytest-cov
+    - pytest --junitxml=report.xml --cov=src/kdotpy --cov-report=xml  # Run pytest and generate reports. Need to adjust directories!
+
+  after_script:
+    - ls
+
+  artifacts:
+    when: always
+    reports:
+      junit: report.xml
+      coverage_report:
+        coverage_format: cobertura
+        path: coverage.xml
+    expire_in: 30 days
+  allow_failure:
+    exit_codes: 5
+
+# Test: Flake8
+flake8:
+  extends: pytest
+  tags:
+    # no tags
+  script:
+    - pip install flake8 flake8-junit-report
+    - flake8 --extend-exclude data/,venv/,*.sh,tests/,*.txt --max-line-length=120 --extend-ignore=E101,E201,E202,E221,E222,E231,E251,E266,E3,E402,E501,E722,E741,W191,W391 --output-file flake8.txt || flake8_junit flake8.txt flake8_junit.xml
+  artifacts:
+    when: always
+    reports:
+      junit: flake8_junit.xml
+    expire_in: 30 days
+  allow_failure: false
+
+# Tests: Backwards compatibility with python 3.9
+pytest_py3.9:
+  extends: pytest
+  image: python:3.9
+
+flake8_py3.9:
+  extends: flake8
+  image: python:3.9
+
+# Pages (Doxygen)
+pages:
+  image: alpine
+  before_script:
+    - apk update && apk add doxygen
+  script:
+    - doxygen Doxyfile
+    - mv docs/html/ public/
+  artifacts:
+    paths:
+      # The folder that contains the files to be exposed at the Page URL
+      - public
+  rules:
+    # This ensures that only pushes to the default branch will trigger
+    # a pages deploy
+    - if: $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH
diff --git a/kdotpy-v1.0.0/CITATION.md b/kdotpy-v1.0.0/CITATION.md
new file mode 100644
index 0000000000000000000000000000000000000000..4c4f5cef5b1c5d9f728602980ae9bfda39efaf1f
--- /dev/null
+++ b/kdotpy-v1.0.0/CITATION.md
@@ -0,0 +1,85 @@
+Citation Policy
+===============
+
+If you use kdotpy, and you benefit from this program, we expect that you cite
+our work in accordance with the rules of good scientific practice.
+
+
+> **NOTE**: The article that accompanies the code is currently being prepared. It
+may change status from "in preparation" to "preprint" and to a full-fledged
+journal publication in the near future. Please take note that the preferred way
+of citing kdotpy will change accordingly.
+
+
+Detailed instructions
+---------------------
+
+- In works in text form, cite the kdotpy project by putting the following
+  citation in your reference list:
+  
+  > W. Beugeling, F. Bayer, C. Berger, J. Böttcher, L. Bovkun, C. Fuchs, 
+  M. Hofer, S. Shamim, M. Siebert, L.-X. Wang, E. M. Hankiewicz, T. Kießling,
+  H. Buhmann, and L. W. Molenkamp,
+  "kdotpy: k·p theory on a lattice for simulating semiconductor band structures",
+  arXiv: 2407.12651 (2024).
+  
+  The citation style may be adjusted as to conform with the guidelines of the
+  publisher. The author list may be abbreviated as
+  > W. Beugeling et al. (The kdotpy collaboration)
+  
+  Works in text form include journal articles, preprints, theses, conference
+  abstracts, conference proceedings, internal reports, and further work similar
+  to these.
+
+- In works in text form, indicate clearly where kdotpy has been used and which
+  results have been obtained using kdotpy. In the text, place citation
+  references in the appropriate places. For figures, it is recommended to add
+  citation references to the figure captions.
+  
+- In presentations, cite kdotpy prominently at least once. The citation style
+  may be chosen at will, provided that it contains the first author, the name
+  'kdotpy', and the publication year. For graphics involving content generated
+  by or with kdotpy, it is encouraged that you use the kdotpy logo bundled with
+  the project in the `docs/logo/` directory.
+
+  Presentations include oral presentations (in conferences, workshops, seminars,
+  and alike), lectures, and posters. Abstracts for presentations are considered
+  works in text form.
+
+- These instructions also apply to results from kdotpy that have been processed
+  afterwards by some other program and/or tool. In particular, if you generate
+  graphics using the data files provided by kdotpy, you must also follow the
+  instructions above.
+
+- For works in text form, it is recommended that you indicate how you used
+  kdotpy, in particular the command line arguments and configuration settings
+  being used. Also indicate clearly if substantial processing has been done
+  afterwards.
+  
+  Whether this recommendation applies depends on the nature of the work. For
+  example, for a conference abstract, it is neither customary nor useful to
+  include such amount of detail.
+  
+  For written publications such as journal articles, preprints, theses, and
+  similar works, it is generally advisable to provide these details in an
+  appendix, supplemental document, methods section, or similar. For submission
+  of data (encouraged by some journals), it is preferred that you submit at
+  least the XML data files, as these contain the metadata necessary for
+  reproducing the results. Submitting (some of) the CSV output files in
+  addition can also be useful.
+
+BibTeX entry
+------------
+
+You may use the following BibTeX entry for citing our preprint:
+```
+@unpublished{kdotpy2024preprint,
+  title = {kdotpy: \ensuremath{\mathbf{k}\cdot\mathbf{p}} theory on a lattice for simulating semiconductor band structures},
+  author = {Wouter Beugeling and Florian Bayer and Christian Berger and Jan B{\"o}ttcher and Leonid Bovkun and Christopher Fuchs and Maximilian Hofer and Saquib Shamim and Moritz Siebert and Li-Xian Wang and Ewelina M. Hankiewicz and Tobias Kie{\ss}ling and Hartmut Buhmann and Laurens W. Molenkamp},
+  year = {2024},
+  eprint = {2407.12651},
+  archivePrefix = {arXiv},
+  primaryClass = {cond-mat.mes-hall},
+  url = {https://arxiv.org/abs/2407.12651}, 
+}
+```
diff --git a/kdotpy-v1.0.0/CONTRIBUTING.md b/kdotpy-v1.0.0/CONTRIBUTING.md
new file mode 100644
index 0000000000000000000000000000000000000000..38b5430c12d082ab697ab8317f46cc7b91fe3928
--- /dev/null
+++ b/kdotpy-v1.0.0/CONTRIBUTING.md
@@ -0,0 +1,78 @@
+Guidelines for contributors
+===========================
+
+This document summarizes some aspects and core values that the developers on
+the kdotpy project are expected to follow. These statements serve to make the
+processes of developing software and doing science as smooth and as constructive
+as possible.
+
+
+Contributors
+============
+
+Anyone who is interested in kdotpy is encouraged to contribute by providing
+feedback and suggestions, or by actively assisting in writing code, designing
+new features, and maintaining the documentation.
+The central hub for these activities is the Git repository, but also feedback
+by e-mail is welcome.
+
+All contributors are expected to act professionally and responsibly at all
+times. In the context of social interaction, it is expected that the guidelines
+spelled out by the [Berlin Code of Conduct](https://berlincodeofconduct.org) are
+followed. Contributors must also act in line with the generally accepted ideas
+of good scientific practice.
+
+All contributors must also respect further applicable laws and regulations,
+including, but not limited to:
+- The license terms and conditions of kdotpy
+- License and/or copyright terms of other works
+- The terms and conditions for using the IT infrastructure of the University of
+  Würzburg, whenever this infrastructure is used
+
+If you detect or suspect a violation of any of the above rules, please report
+to the Developer Team as soon as possible. The Developer Team shall handle such
+reports confidentially.
+
+
+Developer Team
+==============
+
+If you wish to become part of the Developer Team, contact us (the existing team)
+and let us known how you plan to contribute. When you apply to be a member of
+the Developer Team, your application shall be evaluated by the existing team.
+
+Being a developer comes with more responsibilities than 'ordinary' contributors
+have. All members of the development team are expected to act in a cooperative
+manner and in the spirit of the (longer term) project schedule. For this,
+frequent discussions among the developers is essential.
+
+
+Code contributions: AI tools and copyright 
+------------------------------------------
+
+Recently, various AI tools have become available that generate computer code, 
+among other content like text and images. While these tools may seem useful as
+an aid to develop software more efficiently, they are controversial and thus we
+strongly discourage their use for the development on kdotpy. Firstly, they may
+(sometimes inadvertently) lead to copyright infringement, since it is often
+unclear what the source of the generated content is. Secondly, the lack of
+transparency of the source of the generated content goes against the principles
+of good science as well as those of open source software.
+
+Being part of the kdotpy Developer Team means that you take full responsibility
+for the code you contribute and you certify that it does not infringe on others'
+intellectual property rights (copyright). The use of AI tools to generate code
+for kdotpy is generally not acceptable. If you use significant code fragments or
+take inspiration from online sources (e.g., stackoverflow.com), you must include
+a comment in your code with a link to the source. (Basic code which is not
+considered to be copyrightable is exempt from this requirement.)
+
+
+Contact
+=======
+
+e-mail: kdotpy@uni-wuerzburg.de
+
+website: https://kdotpy.physik.uni-wuerzburg.de
+
+Git repository: https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy
diff --git a/kdotpy-v1.0.0/Doxyfile b/kdotpy-v1.0.0/Doxyfile
new file mode 100644
index 0000000000000000000000000000000000000000..4acd48aa9f6f216d1d28094535422f7b068d69b5
--- /dev/null
+++ b/kdotpy-v1.0.0/Doxyfile
@@ -0,0 +1,2658 @@
+# Doxyfile 1.9.1
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project.
+#
+# All text after a double hash (##) is considered a comment and is placed in
+# front of the TAG it is preceding.
+#
+# All text after a single hash (#) is considered a comment and will be ignored.
+# The format is:
+# TAG = value [value, ...]
+# For lists, items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (\" \").
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# This tag specifies the encoding used for all characters in the configuration
+# file that follow. The default is UTF-8 which is also the encoding used for all
+# text before the first occurrence of this tag. Doxygen uses libiconv (or the
+# iconv built into libc) for the transcoding. See
+# https://www.gnu.org/software/libiconv/ for the list of possible encodings.
+# The default value is: UTF-8.
+
+DOXYFILE_ENCODING      = UTF-8
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by
+# double-quotes, unless you are using Doxywizard) that should identify the
+# project for which the documentation is generated. This name is used in the
+# title of most generated pages and in a few other places.
+# The default value is: My Project.
+
+PROJECT_NAME           = "kdotpy"
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
+# could be handy for archiving the generated documentation or if some version
+# control system is used.
+
+PROJECT_NUMBER         =
+
+# Using the PROJECT_BRIEF tag one can provide an optional one line description
+# for a project that appears at the top of each page and should give viewer a
+# quick idea about the purpose of the project. Keep the description short.
+
+PROJECT_BRIEF          =
+
+# With the PROJECT_LOGO tag one can specify a logo or an icon that is included
+# in the documentation. The maximum height of the logo should not exceed 55
+# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy
+# the logo to the output directory.
+
+PROJECT_LOGO           = docs/logo/kdotpy-logo-64.png
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
+# into which the generated documentation will be written. If a relative path is
+# entered, it will be relative to the location where doxygen was started. If
+# left blank the current directory will be used.
+
+OUTPUT_DIRECTORY       = docs
+
+# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub-
+# directories (in 2 levels) under the output directory of each output format and
+# will distribute the generated files over these directories. Enabling this
+# option can be useful when feeding doxygen a huge amount of source files, where
+# putting all generated files in the same directory would otherwise causes
+# performance problems for the file system.
+# The default value is: NO.
+
+CREATE_SUBDIRS         = NO
+
+# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII
+# characters to appear in the names of generated files. If set to NO, non-ASCII
+# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode
+# U+3044.
+# The default value is: NO.
+
+ALLOW_UNICODE_NAMES    = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese,
+# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States),
+# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian,
+# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages),
+# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian,
+# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian,
+# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish,
+# Ukrainian and Vietnamese.
+# The default value is: English.
+
+OUTPUT_LANGUAGE        = English
+
+# The OUTPUT_TEXT_DIRECTION tag is used to specify the direction in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all generated output in the proper direction.
+# Possible values are: None, LTR, RTL and Context.
+# The default value is: None.
+
+OUTPUT_TEXT_DIRECTION  = None
+
+# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member
+# descriptions after the members that are listed in the file and class
+# documentation (similar to Javadoc). Set to NO to disable this.
+# The default value is: YES.
+
+BRIEF_MEMBER_DESC      = YES
+
+# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief
+# description of a member or function before the detailed description
+#
+# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+# The default value is: YES.
+
+REPEAT_BRIEF           = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator that is
+# used to form the text in various listings. Each string in this list, if found
+# as the leading text of the brief description, will be stripped from the text
+# and the result, after processing the whole list, is used as the annotated
+# text. Otherwise, the brief description is used as-is. If left blank, the
+# following values are used ($name is automatically replaced with the name of
+# the entity):The $name class, The $name widget, The $name file, is, provides,
+# specifies, contains, represents, a, an and the.
+
+ABBREVIATE_BRIEF       = "The $name class" \
+                         "The $name widget" \
+                         "The $name file" \
+                         is \
+                         provides \
+                         specifies \
+                         contains \
+                         represents \
+                         a \
+                         an \
+                         the
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# doxygen will generate a detailed section even if there is only a brief
+# description.
+# The default value is: NO.
+
+ALWAYS_DETAILED_SEC    = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+# The default value is: NO.
+
+INLINE_INHERITED_MEMB  = NO
+
+# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path
+# before files name in the file list and in the header files. If set to NO the
+# shortest path that makes the file name unique will be used
+# The default value is: YES.
+
+FULL_PATH_NAMES        = YES
+
+# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path.
+# Stripping is only done if one of the specified strings matches the left-hand
+# part of the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the path to
+# strip.
+#
+# Note that you can specify absolute paths here, but also relative paths, which
+# will be relative from the directory where doxygen is started.
+# This tag requires that the tag FULL_PATH_NAMES is set to YES.
+
+STRIP_FROM_PATH        = src
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
+# path mentioned in the documentation of a class, which tells the reader which
+# header file to include in order to use a class. If left blank only the name of
+# the header file containing the class definition is used. Otherwise one should
+# specify the list of include paths that are normally passed to the compiler
+# using the -I flag.
+
+STRIP_FROM_INC_PATH    =
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
+# less readable) file names. This can be useful is your file systems doesn't
+# support long names like on DOS, Mac, or CD-ROM.
+# The default value is: NO.
+
+SHORT_NAMES            = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the
+# first line (until the first dot) of a Javadoc-style comment as the brief
+# description. If set to NO, the Javadoc-style will behave just like regular Qt-
+# style comments (thus requiring an explicit @brief command for a brief
+# description.)
+# The default value is: NO.
+
+JAVADOC_AUTOBRIEF      = NO
+
+# If the JAVADOC_BANNER tag is set to YES then doxygen will interpret a line
+# such as
+# /***************
+# as being the beginning of a Javadoc-style comment "banner". If set to NO, the
+# Javadoc-style will behave just like regular comments and it will not be
+# interpreted by doxygen.
+# The default value is: NO.
+
+JAVADOC_BANNER         = NO
+
+# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
+# line (until the first dot) of a Qt-style comment as the brief description. If
+# set to NO, the Qt-style will behave just like regular Qt-style comments (thus
+# requiring an explicit \brief command for a brief description.)
+# The default value is: NO.
+
+QT_AUTOBRIEF           = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a
+# multi-line C++ special comment block (i.e. a block of //! or /// comments) as
+# a brief description. This used to be the default behavior. The new default is
+# to treat a multi-line C++ comment block as a detailed description. Set this
+# tag to YES if you prefer the old behavior instead.
+#
+# Note that setting this tag to YES also means that rational rose comments are
+# not recognized any more.
+# The default value is: NO.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# By default Python docstrings are displayed as preformatted text and doxygen's
+# special commands cannot be used. By setting PYTHON_DOCSTRING to NO the
+# doxygen's special commands can be used and the contents of the docstring
+# documentation blocks is shown as doxygen documentation.
+# The default value is: YES.
+
+PYTHON_DOCSTRING       = YES
+
+# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
+# documentation from any documented member that it re-implements.
+# The default value is: YES.
+
+INHERIT_DOCS           = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new
+# page for each member. If set to NO, the documentation of a member will be part
+# of the file/class/namespace that contains it.
+# The default value is: NO.
+
+SEPARATE_MEMBER_PAGES  = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen
+# uses this value to replace tabs by spaces in code fragments.
+# Minimum value: 1, maximum value: 16, default value: 4.
+
+TAB_SIZE               = 4
+
+# This tag can be used to specify a number of aliases that act as commands in
+# the documentation. An alias has the form:
+# name=value
+# For example adding
+# "sideeffect=@par Side Effects:\n"
+# will allow you to put the command \sideeffect (or @sideeffect) in the
+# documentation, which will result in a user-defined paragraph with heading
+# "Side Effects:". You can put \n's in the value part of an alias to insert
+# newlines (in the resulting output). You can put ^^ in the value part of an
+# alias to insert a newline as if a physical newline was in the original file.
+# When you need a literal { or } or , in the value part of an alias you have to
+# escape them by means of a backslash (\), this can lead to conflicts with the
+# commands \{ and \} for these it is advised to use the version @{ and @} or use
+# a double escape (\\{ and \\})
+
+ALIASES                =
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
+# only. Doxygen will then generate output that is more tailored for C. For
+# instance, some of the names that are used will be different. The list of all
+# members will be omitted, etc.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_FOR_C  = NO
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or
+# Python sources only. Doxygen will then generate output that is more tailored
+# for that language. For instance, namespaces will be presented as packages,
+# qualified scopes will look different, etc.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_JAVA   = YES
+
+# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
+# sources. Doxygen will then generate output that is tailored for Fortran.
+# The default value is: NO.
+
+OPTIMIZE_FOR_FORTRAN   = NO
+
+# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
+# sources. Doxygen will then generate output that is tailored for VHDL.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_VHDL   = NO
+
+# Set the OPTIMIZE_OUTPUT_SLICE tag to YES if your project consists of Slice
+# sources only. Doxygen will then generate output that is more tailored for that
+# language. For instance, namespaces will be presented as modules, types will be
+# separated into more groups, etc.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_SLICE  = NO
+
+# Doxygen selects the parser to use depending on the extension of the files it
+# parses. With this tag you can assign which parser to use for a given
+# extension. Doxygen has a built-in mapping, but you can override or extend it
+# using this tag. The format is ext=language, where ext is a file extension, and
+# language is one of the parsers supported by doxygen: IDL, Java, JavaScript,
+# Csharp (C#), C, C++, D, PHP, md (Markdown), Objective-C, Python, Slice, VHDL,
+# Fortran (fixed format Fortran: FortranFixed, free formatted Fortran:
+# FortranFree, unknown formatted Fortran: Fortran. In the later case the parser
+# tries to guess whether the code is fixed or free formatted code, this is the
+# default for Fortran type files). For instance to make doxygen treat .inc files
+# as Fortran files (default is PHP), and .f files as C (default is Fortran),
+# use: inc=Fortran f=C.
+#
+# Note: For files without extension you can use no_extension as a placeholder.
+#
+# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
+# the files are not read by doxygen. When specifying no_extension you should add
+# * to the FILE_PATTERNS.
+#
+# Note see also the list of default file extension mappings.
+
+EXTENSION_MAPPING      =
+
+# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
+# according to the Markdown format, which allows for more readable
+# documentation. See https://daringfireball.net/projects/markdown/ for details.
+# The output of markdown processing is further processed by doxygen, so you can
+# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in
+# case of backward compatibilities issues.
+# The default value is: YES.
+
+MARKDOWN_SUPPORT       = YES
+
+# When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up
+# to that level are automatically included in the table of contents, even if
+# they do not have an id attribute.
+# Note: This feature currently applies only to Markdown headings.
+# Minimum value: 0, maximum value: 99, default value: 5.
+# This tag requires that the tag MARKDOWN_SUPPORT is set to YES.
+
+TOC_INCLUDE_HEADINGS   = 5
+
+# When enabled doxygen tries to link words that correspond to documented
+# classes, or namespaces to their corresponding documentation. Such a link can
+# be prevented in individual cases by putting a % sign in front of the word or
+# globally by setting AUTOLINK_SUPPORT to NO.
+# The default value is: YES.
+
+AUTOLINK_SUPPORT       = YES
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
+# to include (a tag file for) the STL sources as input, then you should set this
+# tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string);
+# versus func(std::string) {}). This also make the inheritance and collaboration
+# diagrams that involve STL classes more complete and accurate.
+# The default value is: NO.
+
+BUILTIN_STL_SUPPORT    = NO
+
+# If you use Microsoft's C++/CLI language, you should set this option to YES to
+# enable parsing support.
+# The default value is: NO.
+
+CPP_CLI_SUPPORT        = NO
+
+# Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
+# https://www.riverbankcomputing.com/software/sip/intro) sources only. Doxygen
+# will parse them like normal C++ but will assume all classes use public instead
+# of private inheritance when no explicit protection keyword is present.
+# The default value is: NO.
+
+SIP_SUPPORT            = NO
+
+# For Microsoft's IDL there are propget and propput attributes to indicate
+# getter and setter methods for a property. Setting this option to YES will make
+# doxygen to replace the get and set methods by a property in the documentation.
+# This will only work if the methods are indeed getting or setting a simple
+# type. If this is not the case, or you want to show the methods anyway, you
+# should set this option to NO.
+# The default value is: YES.
+
+IDL_PROPERTY_SUPPORT   = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+# The default value is: NO.
+
+DISTRIBUTE_GROUP_DOC   = NO
+
+# If one adds a struct or class to a group and this option is enabled, then also
+# any nested class or struct is added to the same group. By default this option
+# is disabled and one has to add nested compounds explicitly via \ingroup.
+# The default value is: NO.
+
+GROUP_NESTED_COMPOUNDS = NO
+
+# Set the SUBGROUPING tag to YES to allow class member groups of the same type
+# (for instance a group of public functions) to be put as a subgroup of that
+# type (e.g. under the Public Functions section). Set it to NO to prevent
+# subgrouping. Alternatively, this can be done per class using the
+# \nosubgrouping command.
+# The default value is: YES.
+
+SUBGROUPING            = YES
+
+# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions
+# are shown inside the group in which they are included (e.g. using \ingroup)
+# instead of on a separate page (for HTML and Man pages) or section (for LaTeX
+# and RTF).
+#
+# Note that this feature does not work in combination with
+# SEPARATE_MEMBER_PAGES.
+# The default value is: NO.
+
+INLINE_GROUPED_CLASSES = NO
+
+# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions
+# with only public data fields or simple typedef fields will be shown inline in
+# the documentation of the scope in which they are defined (i.e. file,
+# namespace, or group documentation), provided this scope is documented. If set
+# to NO, structs, classes, and unions are shown on a separate page (for HTML and
+# Man pages) or section (for LaTeX and RTF).
+# The default value is: NO.
+
+INLINE_SIMPLE_STRUCTS  = NO
+
+# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or
+# enum is documented as struct, union, or enum with the name of the typedef. So
+# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
+# with name TypeT. When disabled the typedef will appear as a member of a file,
+# namespace, or class. And the struct will be named TypeS. This can typically be
+# useful for C code in case the coding convention dictates that all compound
+# types are typedef'ed and only the typedef is referenced, never the tag name.
+# The default value is: NO.
+
+TYPEDEF_HIDES_STRUCT   = NO
+
+# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
+# cache is used to resolve symbols given their name and scope. Since this can be
+# an expensive process and often the same symbol appears multiple times in the
+# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small
+# doxygen will become slower. If the cache is too large, memory is wasted. The
+# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range
+# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536
+# symbols. At the end of a run doxygen will report the cache usage and suggest
+# the optimal cache size from a speed point of view.
+# Minimum value: 0, maximum value: 9, default value: 0.
+
+LOOKUP_CACHE_SIZE      = 0
+
+# The NUM_PROC_THREADS specifies the number threads doxygen is allowed to use
+# during processing. When set to 0 doxygen will based this on the number of
+# cores available in the system. You can set it explicitly to a value larger
+# than 0 to get more control over the balance between CPU load and processing
+# speed. At this moment only the input processing can be done using multiple
+# threads. Since this is still an experimental feature the default is set to 1,
+# which efficively disables parallel processing. Please report any issues you
+# encounter. Generating dot graphs in parallel is controlled by the
+# DOT_NUM_THREADS setting.
+# Minimum value: 0, maximum value: 32, default value: 1.
+
+NUM_PROC_THREADS       = 1
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in
+# documentation are documented, even if no documentation was available. Private
+# class members and static file members will be hidden unless the
+# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
+# Note: This will also disable the warnings about undocumented members that are
+# normally produced when WARNINGS is set to YES.
+# The default value is: NO.
+
+EXTRACT_ALL            = YES
+
+# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will
+# be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PRIVATE        = YES
+
+# If the EXTRACT_PRIV_VIRTUAL tag is set to YES, documented private virtual
+# methods of a class will be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PRIV_VIRTUAL   = NO
+
+# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal
+# scope will be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PACKAGE        = NO
+
+# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be
+# included in the documentation.
+# The default value is: NO.
+
+EXTRACT_STATIC         = YES
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined
+# locally in source files will be included in the documentation. If set to NO,
+# only classes defined in header files are included. Does not have any effect
+# for Java sources.
+# The default value is: YES.
+
+EXTRACT_LOCAL_CLASSES  = YES
+
+# This flag is only useful for Objective-C code. If set to YES, local methods,
+# which are defined in the implementation section but not in the interface are
+# included in the documentation. If set to NO, only methods in the interface are
+# included.
+# The default value is: NO.
+
+EXTRACT_LOCAL_METHODS  = YES
+
+# If this flag is set to YES, the members of anonymous namespaces will be
+# extracted and appear in the documentation as a namespace called
+# 'anonymous_namespace{file}', where file will be replaced with the base name of
+# the file that contains the anonymous namespace. By default anonymous namespace
+# are hidden.
+# The default value is: NO.
+
+EXTRACT_ANON_NSPACES   = YES
+
+# If this flag is set to YES, the name of an unnamed parameter in a declaration
+# will be determined by the corresponding definition. By default unnamed
+# parameters remain unnamed in the output.
+# The default value is: YES.
+
+RESOLVE_UNNAMED_PARAMS = YES
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
+# undocumented members inside documented classes or files. If set to NO these
+# members will be included in the various overviews, but no documentation
+# section is generated. This option has no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
+
+HIDE_UNDOC_MEMBERS     = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy. If set
+# to NO, these classes will be included in the various overviews. This option
+# has no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
+
+HIDE_UNDOC_CLASSES     = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
+# declarations. If set to NO, these declarations will be included in the
+# documentation.
+# The default value is: NO.
+
+HIDE_FRIEND_COMPOUNDS  = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
+# documentation blocks found inside the body of a function. If set to NO, these
+# blocks will be appended to the function's detailed documentation block.
+# The default value is: NO.
+
+HIDE_IN_BODY_DOCS      = NO
+
+# The INTERNAL_DOCS tag determines if documentation that is typed after a
+# \internal command is included. If the tag is set to NO then the documentation
+# will be excluded. Set it to YES to include the internal documentation.
+# The default value is: NO.
+
+INTERNAL_DOCS          = NO
+
+# With the correct setting of option CASE_SENSE_NAMES doxygen will better be
+# able to match the capabilities of the underlying filesystem. In case the
+# filesystem is case sensitive (i.e. it supports files in the same directory
+# whose names only differ in casing), the option must be set to YES to properly
+# deal with such files in case they appear in the input. For filesystems that
+# are not case sensitive the option should be be set to NO to properly deal with
+# output files written for symbols that only differ in casing, such as for two
+# classes, one named CLASS and the other named Class, and to also support
+# references to files without having to specify the exact matching casing. On
+# Windows (including Cygwin) and MacOS, users should typically set this option
+# to NO, whereas on Linux or other Unix flavors it should typically be set to
+# YES.
+# The default value is: system dependent.
+
+CASE_SENSE_NAMES       = YES
+
+# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
+# their full class and namespace scopes in the documentation. If set to YES, the
+# scope will be hidden.
+# The default value is: NO.
+
+HIDE_SCOPE_NAMES       = NO
+
+# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will
+# append additional text to a page's title, such as Class Reference. If set to
+# YES the compound reference will be hidden.
+# The default value is: NO.
+
+HIDE_COMPOUND_REFERENCE= NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
+# the files that are included by a file in the documentation of that file.
+# The default value is: YES.
+
+SHOW_INCLUDE_FILES     = YES
+
+# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each
+# grouped member an include statement to the documentation, telling the reader
+# which file to include in order to use the member.
+# The default value is: NO.
+
+SHOW_GROUPED_MEMB_INC  = NO
+
+# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include
+# files with double quotes in the documentation rather than with sharp brackets.
+# The default value is: NO.
+
+FORCE_LOCAL_INCLUDES   = NO
+
+# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the
+# documentation for inline members.
+# The default value is: YES.
+
+INLINE_INFO            = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
+# (detailed) documentation of file and class members alphabetically by member
+# name. If set to NO, the members will appear in declaration order.
+# The default value is: YES.
+
+SORT_MEMBER_DOCS       = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
+# descriptions of file, namespace and class members alphabetically by member
+# name. If set to NO, the members will appear in declaration order. Note that
+# this will also influence the order of the classes in the class list.
+# The default value is: NO.
+
+SORT_BRIEF_DOCS        = NO
+
+# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the
+# (brief and detailed) documentation of class members so that constructors and
+# destructors are listed first. If set to NO the constructors will appear in the
+# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS.
+# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief
+# member documentation.
+# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting
+# detailed member documentation.
+# The default value is: NO.
+
+SORT_MEMBERS_CTORS_1ST = NO
+
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy
+# of group names into alphabetical order. If set to NO the group names will
+# appear in their defined order.
+# The default value is: NO.
+
+SORT_GROUP_NAMES       = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by
+# fully-qualified names, including namespaces. If set to NO, the class list will
+# be sorted only by class name, not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the alphabetical
+# list.
+# The default value is: NO.
+
+SORT_BY_SCOPE_NAME     = NO
+
+# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper
+# type resolution of all parameters of a function it will reject a match between
+# the prototype and the implementation of a member function even if there is
+# only one candidate or it is obvious which candidate to choose by doing a
+# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still
+# accept a match between prototype and implementation in such cases.
+# The default value is: NO.
+
+STRICT_PROTO_MATCHING  = NO
+
+# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo
+# list. This list is created by putting \todo commands in the documentation.
+# The default value is: YES.
+
+GENERATE_TODOLIST      = NO
+
+# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test
+# list. This list is created by putting \test commands in the documentation.
+# The default value is: YES.
+
+GENERATE_TESTLIST      = NO
+
+# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug
+# list. This list is created by putting \bug commands in the documentation.
+# The default value is: YES.
+
+GENERATE_BUGLIST       = NO
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO)
+# the deprecated list. This list is created by putting \deprecated commands in
+# the documentation.
+# The default value is: YES.
+
+GENERATE_DEPRECATEDLIST= NO
+
+# The ENABLED_SECTIONS tag can be used to enable conditional documentation
+# sections, marked by \if <section_label> ... \endif and \cond <section_label>
+# ... \endcond blocks.
+
+ENABLED_SECTIONS       =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
+# initial value of a variable or macro / define can have for it to appear in the
+# documentation. If the initializer consists of more lines than specified here
+# it will be hidden. Use a value of 0 to hide initializers completely. The
+# appearance of the value of individual variables and macros / defines can be
+# controlled using \showinitializer or \hideinitializer command in the
+# documentation regardless of this setting.
+# Minimum value: 0, maximum value: 10000, default value: 30.
+
+MAX_INITIALIZER_LINES  = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
+# the bottom of the documentation of classes and structs. If set to YES, the
+# list will mention the files that were used to generate the documentation.
+# The default value is: YES.
+
+SHOW_USED_FILES        = YES
+
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This
+# will remove the Files entry from the Quick Index and from the Folder Tree View
+# (if specified).
+# The default value is: YES.
+
+SHOW_FILES             = YES
+
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces
+# page. This will remove the Namespaces entry from the Quick Index and from the
+# Folder Tree View (if specified).
+# The default value is: YES.
+
+SHOW_NAMESPACES        = YES
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from
+# the version control system). Doxygen will invoke the program by executing (via
+# popen()) the command command input-file, where command is the value of the
+# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided
+# by doxygen. Whatever the program writes to standard output is used as the file
+# version. For an example see the documentation.
+
+FILE_VERSION_FILTER    =
+
+# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
+# by doxygen. The layout file controls the global structure of the generated
+# output files in an output format independent way. To create the layout file
+# that represents doxygen's defaults, run doxygen with the -l option. You can
+# optionally specify a file name after the option, if omitted DoxygenLayout.xml
+# will be used as the name of the layout file.
+#
+# Note that if you run doxygen from a directory containing a file called
+# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
+# tag is left empty.
+
+LAYOUT_FILE            =
+
+# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
+# the reference definitions. This must be a list of .bib files. The .bib
+# extension is automatically appended if omitted. This requires the bibtex tool
+# to be installed. See also https://en.wikipedia.org/wiki/BibTeX for more info.
+# For LaTeX the style of the bibliography can be controlled using
+# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
+# search path. See also \cite for info how to create references.
+
+CITE_BIB_FILES         =
+
+#---------------------------------------------------------------------------
+# Configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated to
+# standard output by doxygen. If QUIET is set to YES this implies that the
+# messages are off.
+# The default value is: NO.
+
+QUIET                  = NO
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES
+# this implies that the warnings are on.
+#
+# Tip: Turn warnings on while writing the documentation.
+# The default value is: YES.
+
+WARNINGS               = YES
+
+# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate
+# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
+# will automatically be disabled.
+# The default value is: YES.
+
+WARN_IF_UNDOCUMENTED   = YES
+
+# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some parameters
+# in a documented function, or documenting parameters that don't exist or using
+# markup commands wrongly.
+# The default value is: YES.
+
+WARN_IF_DOC_ERROR      = YES
+
+# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
+# are documented, but have no documentation for their parameters or return
+# value. If set to NO, doxygen will only warn about wrong or incomplete
+# parameter documentation, but not about the absence of documentation. If
+# EXTRACT_ALL is set to YES then this flag will automatically be disabled.
+# The default value is: NO.
+
+WARN_NO_PARAMDOC       = NO
+
+# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when
+# a warning is encountered. If the WARN_AS_ERROR tag is set to FAIL_ON_WARNINGS
+# then doxygen will continue running as if WARN_AS_ERROR tag is set to NO, but
+# at the end of the doxygen process doxygen will return with a non-zero status.
+# Possible values are: NO, YES and FAIL_ON_WARNINGS.
+# The default value is: NO.
+
+WARN_AS_ERROR          = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that doxygen
+# can produce. The string should contain the $file, $line, and $text tags, which
+# will be replaced by the file and line number from which the warning originated
+# and the warning text. Optionally the format may contain $version, which will
+# be replaced by the version of the file (if it could be obtained via
+# FILE_VERSION_FILTER)
+# The default value is: $file:$line: $text.
+
+WARN_FORMAT            = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning and error
+# messages should be written. If left blank the output is written to standard
+# error (stderr).
+
+WARN_LOGFILE           =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag is used to specify the files and/or directories that contain
+# documented source files. You may enter file names like myfile.cpp or
+# directories like /usr/src/myproject. Separate the files or directories with
+# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING
+# Note: If this tag is empty the current directory is searched.
+
+INPUT                  = src/kdotpy README.md
+
+# This tag can be used to specify the character encoding of the source files
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
+# libiconv (or the iconv built into libc) for the transcoding. See the libiconv
+# documentation (see:
+# https://www.gnu.org/software/libiconv/) for the list of possible encodings.
+# The default value is: UTF-8.
+
+INPUT_ENCODING         = UTF-8
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
+# *.h) to filter out the source-files in the directories.
+#
+# Note that for custom extensions or not directly supported extensions you also
+# need to set EXTENSION_MAPPING for the extension otherwise the files are not
+# read by doxygen.
+#
+# Note the list of default checked file patterns might differ from the list of
+# default file extension mappings.
+#
+# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp,
+# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h,
+# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc,
+# *.m, *.markdown, *.md, *.mm, *.dox (to be provided as doxygen C comment),
+# *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, *.f18, *.f, *.for, *.vhd, *.vhdl,
+# *.ucf, *.qsf and *.ice.
+
+FILE_PATTERNS          = *.c \
+                         *.cc \
+                         *.cxx \
+                         *.cpp \
+                         *.c++ \
+                         *.java \
+                         *.ii \
+                         *.ixx \
+                         *.ipp \
+                         *.i++ \
+                         *.inl \
+                         *.idl \
+                         *.ddl \
+                         *.odl \
+                         *.h \
+                         *.hh \
+                         *.hxx \
+                         *.hpp \
+                         *.h++ \
+                         *.cs \
+                         *.d \
+                         *.php \
+                         *.php4 \
+                         *.php5 \
+                         *.phtml \
+                         *.inc \
+                         *.m \
+                         *.markdown \
+                         *.md \
+                         *.mm \
+                         *.dox \
+                         *.py \
+                         *.pyw \
+                         *.f90 \
+                         *.f95 \
+                         *.f03 \
+                         *.f08 \
+                         *.f18 \
+                         *.f \
+                         *.for \
+                         *.vhd \
+                         *.vhdl \
+                         *.ucf \
+                         *.qsf \
+                         *.ice
+
+# The RECURSIVE tag can be used to specify whether or not subdirectories should
+# be searched for input files as well.
+# The default value is: NO.
+
+RECURSIVE              = YES
+
+# The EXCLUDE tag can be used to specify files and/or directories that should be
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+#
+# Note that relative paths are relative to the directory from which doxygen is
+# run.
+
+EXCLUDE                =
+
+# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
+# directories that are symbolic links (a Unix file system feature) are excluded
+# from the input.
+# The default value is: NO.
+
+EXCLUDE_SYMLINKS       = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories.
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories for example use the pattern */test/*
+
+EXCLUDE_PATTERNS       =
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# AClass::ANamespace, ANamespace::*Test
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories use the pattern */test/*
+
+EXCLUDE_SYMBOLS        =
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or directories
+# that contain example code fragments that are included (see the \include
+# command).
+
+EXAMPLE_PATH           =
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
+# *.h) to filter out the source-files in the directories. If left blank all
+# files are included.
+
+EXAMPLE_PATTERNS       = *
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude commands
+# irrespective of the value of the RECURSIVE tag.
+# The default value is: NO.
+
+EXAMPLE_RECURSIVE      = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or directories
+# that contain images that are to be included in the documentation (see the
+# \image command).
+
+IMAGE_PATH             =
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command:
+#
+# <filter> <input-file>
+#
+# where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the
+# name of an input file. Doxygen will then use the output that the filter
+# program writes to standard output. If FILTER_PATTERNS is specified, this tag
+# will be ignored.
+#
+# Note that the filter must not add or remove lines; it is applied before the
+# code is scanned, but not when the output code is generated. If lines are added
+# or removed, the anchors will not be placed correctly.
+#
+# Note that for custom extensions or not directly supported extensions you also
+# need to set EXTENSION_MAPPING for the extension otherwise the files are not
+# properly processed by doxygen.
+
+INPUT_FILTER           =
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis. Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match. The filters are a list of the form: pattern=filter
+# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
+# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
+# patterns match the file name, INPUT_FILTER is applied.
+#
+# Note that for custom extensions or not directly supported extensions you also
+# need to set EXTENSION_MAPPING for the extension otherwise the files are not
+# properly processed by doxygen.
+
+FILTER_PATTERNS        =
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER) will also be used to filter the input files that are used for
+# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
+# The default value is: NO.
+
+FILTER_SOURCE_FILES    = NO
+
+# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
+# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and
+# it is also possible to disable source filtering for a specific pattern using
+# *.ext= (so without naming a filter).
+# This tag requires that the tag FILTER_SOURCE_FILES is set to YES.
+
+FILTER_SOURCE_PATTERNS =
+
+# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that
+# is part of the input, its contents will be placed on the main page
+# (index.html). This can be useful if you have a project on for instance GitHub
+# and want to reuse the introduction page also for the doxygen output.
+
+USE_MDFILE_AS_MAINPAGE = ./README.md
+
+#---------------------------------------------------------------------------
+# Configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will be
+# generated. Documented entities will be cross-referenced with these sources.
+#
+# Note: To get rid of all source code in the generated output, make sure that
+# also VERBATIM_HEADERS is set to NO.
+# The default value is: NO.
+
+SOURCE_BROWSER         = NO
+
+# Setting the INLINE_SOURCES tag to YES will include the body of functions,
+# classes and enums directly into the documentation.
+# The default value is: NO.
+
+INLINE_SOURCES         = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any
+# special comment blocks from generated source code fragments. Normal C, C++ and
+# Fortran comments will always remain visible.
+# The default value is: YES.
+
+STRIP_CODE_COMMENTS    = YES
+
+# If the REFERENCED_BY_RELATION tag is set to YES then for each documented
+# entity all documented functions referencing it will be listed.
+# The default value is: NO.
+
+REFERENCED_BY_RELATION = NO
+
+# If the REFERENCES_RELATION tag is set to YES then for each documented function
+# all documented entities called/used by that function will be listed.
+# The default value is: NO.
+
+REFERENCES_RELATION    = NO
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
+# to YES then the hyperlinks from functions in REFERENCES_RELATION and
+# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
+# link to the documentation.
+# The default value is: YES.
+
+REFERENCES_LINK_SOURCE = YES
+
+# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the
+# source code will show a tooltip with additional information such as prototype,
+# brief description and links to the definition and documentation. Since this
+# will make the HTML file larger and loading of large files a bit slower, you
+# can opt to disable this feature.
+# The default value is: YES.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
+
+SOURCE_TOOLTIPS        = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code will
+# point to the HTML generated by the htags(1) tool instead of doxygen built-in
+# source browser. The htags tool is part of GNU's global source tagging system
+# (see https://www.gnu.org/software/global/global.html). You will need version
+# 4.8.6 or higher.
+#
+# To use it do the following:
+# - Install the latest version of global
+# - Enable SOURCE_BROWSER and USE_HTAGS in the configuration file
+# - Make sure the INPUT points to the root of the source tree
+# - Run doxygen as normal
+#
+# Doxygen will invoke htags (and that will in turn invoke gtags), so these
+# tools must be available from the command line (i.e. in the search path).
+#
+# The result: instead of the source browser generated by doxygen, the links to
+# source code will now point to the output of htags.
+# The default value is: NO.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
+
+USE_HTAGS              = NO
+
+# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a
+# verbatim copy of the header file for each class for which an include is
+# specified. Set to NO to disable this.
+# See also: Section \class.
+# The default value is: YES.
+
+VERBATIM_HEADERS       = YES
+
+# If the CLANG_ASSISTED_PARSING tag is set to YES then doxygen will use the
+# clang parser (see:
+# http://clang.llvm.org/) for more accurate parsing at the cost of reduced
+# performance. This can be particularly helpful with template rich C++ code for
+# which doxygen's built-in parser lacks the necessary type information.
+# Note: The availability of this option depends on whether or not doxygen was
+# generated with the -Duse_libclang=ON option for CMake.
+# The default value is: NO.
+
+CLANG_ASSISTED_PARSING = NO
+
+# If clang assisted parsing is enabled and the CLANG_ADD_INC_PATHS tag is set to
+# YES then doxygen will add the directory of each input to the include path.
+# The default value is: YES.
+
+CLANG_ADD_INC_PATHS    = YES
+
+# If clang assisted parsing is enabled you can provide the compiler with command
+# line options that you would normally use when invoking the compiler. Note that
+# the include paths will already be set by doxygen for the files and directories
+# specified with INPUT and INCLUDE_PATH.
+# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES.
+
+CLANG_OPTIONS          =
+
+# If clang assisted parsing is enabled you can provide the clang parser with the
+# path to the directory containing a file called compile_commands.json. This
+# file is the compilation database (see:
+# http://clang.llvm.org/docs/HowToSetupToolingForLLVM.html) containing the
+# options used when the source files were built. This is equivalent to
+# specifying the -p option to a clang tool, such as clang-check. These options
+# will then be passed to the parser. Any options specified with CLANG_OPTIONS
+# will be added as well.
+# Note: The availability of this option depends on whether or not doxygen was
+# generated with the -Duse_libclang=ON option for CMake.
+
+CLANG_DATABASE_PATH    =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all
+# compounds will be generated. Enable this if the project contains a lot of
+# classes, structs, unions or interfaces.
+# The default value is: YES.
+
+ALPHABETICAL_INDEX     = YES
+
+# In case all classes in a project start with a common prefix, all classes will
+# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
+# can be used to specify a prefix (or a list of prefixes) that should be ignored
+# while generating the index headers.
+# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
+
+IGNORE_PREFIX          =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output
+# The default value is: YES.
+
+GENERATE_HTML          = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_OUTPUT            = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each
+# generated HTML page (for example: .htm, .php, .asp).
+# The default value is: .html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FILE_EXTENSION    = .html
+
+# The HTML_HEADER tag can be used to specify a user-defined HTML header file for
+# each generated HTML page. If the tag is left blank doxygen will generate a
+# standard header.
+#
+# To get valid HTML the header file that includes any scripts and style sheets
+# that doxygen needs, which is dependent on the configuration options used (e.g.
+# the setting GENERATE_TREEVIEW). It is highly recommended to start with a
+# default header using
+# doxygen -w html new_header.html new_footer.html new_stylesheet.css
+# YourConfigFile
+# and then modify the file new_header.html. See also section "Doxygen usage"
+# for information on how to generate the default header that doxygen normally
+# uses.
+# Note: The header is subject to change so you typically have to regenerate the
+# default header when upgrading to a newer version of doxygen. For a description
+# of the possible markers and block names see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_HEADER            =
+
+# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
+# generated HTML page. If the tag is left blank doxygen will generate a standard
+# footer. See HTML_HEADER for more information on how to generate a default
+# footer and what special commands can be used inside the footer. See also
+# section "Doxygen usage" for information on how to generate the default footer
+# that doxygen normally uses.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FOOTER            =
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
+# sheet that is used by each HTML page. It can be used to fine-tune the look of
+# the HTML output. If left blank doxygen will generate a default style sheet.
+# See also section "Doxygen usage" for information on how to generate the style
+# sheet that doxygen normally uses.
+# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as
+# it is more robust and this tag (HTML_STYLESHEET) will in the future become
+# obsolete.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_STYLESHEET        =
+
+# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined
+# cascading style sheets that are included after the standard style sheets
+# created by doxygen. Using this option one can overrule certain style aspects.
+# This is preferred over using HTML_STYLESHEET since it does not replace the
+# standard style sheet and is therefore more robust against future updates.
+# Doxygen will copy the style sheet files to the output directory.
+# Note: The order of the extra style sheet files is of importance (e.g. the last
+# style sheet in the list overrules the setting of the previous ones in the
+# list). For an example see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_EXTRA_STYLESHEET  =
+
+# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the HTML output directory. Note
+# that these files will be copied to the base HTML output directory. Use the
+# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
+# files. In the HTML_STYLESHEET file, use the file name only. Also note that the
+# files will be copied as-is; there are no commands or markers available.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_EXTRA_FILES       =
+
+# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
+# will adjust the colors in the style sheet and background images according to
+# this color. Hue is specified as an angle on a colorwheel, see
+# https://en.wikipedia.org/wiki/Hue for more information. For instance the value
+# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
+# purple, and 360 is red again.
+# Minimum value: 0, maximum value: 359, default value: 220.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_HUE    = 212
+
+# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
+# in the HTML output. For a value of 0 the output will use grayscales only. A
+# value of 255 will produce the most vivid colors.
+# Minimum value: 0, maximum value: 255, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_SAT    = 110
+
+# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the
+# luminance component of the colors in the HTML output. Values below 100
+# gradually make the output lighter, whereas values above 100 make the output
+# darker. The value divided by 100 is the actual gamma applied, so 80 represents
+# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not
+# change the gamma.
+# Minimum value: 40, maximum value: 240, default value: 80.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_GAMMA  = 80
+
+# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
+# page will contain the date and time when the page was generated. Setting this
+# to YES can help to show when doxygen was last run and thus if the
+# documentation is up to date.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_TIMESTAMP         = NO
+
+# If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML
+# documentation will contain a main index with vertical navigation menus that
+# are dynamically created via JavaScript. If disabled, the navigation index will
+# consists of multiple levels of tabs that are statically embedded in every HTML
+# page. Disable this option to support browsers that do not have JavaScript,
+# like the Qt help browser.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_DYNAMIC_MENUS     = YES
+
+# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
+# documentation will contain sections that can be hidden and shown after the
+# page has loaded.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_DYNAMIC_SECTIONS  = NO
+
+# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
+# shown in the various tree structured indices initially; the user can expand
+# and collapse entries dynamically later on. Doxygen will expand the tree to
+# such a level that at most the specified number of entries are visible (unless
+# a fully collapsed tree already exceeds this amount). So setting the number of
+# entries 1 will produce a full collapsed tree by default. 0 is a special value
+# representing an infinite number of entries and will result in a full expanded
+# tree by default.
+# Minimum value: 0, maximum value: 9999, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_INDEX_NUM_ENTRIES = 100
+
+# If the GENERATE_DOCSET tag is set to YES, additional index files will be
+# generated that can be used as input for Apple's Xcode 3 integrated development
+# environment (see:
+# https://developer.apple.com/xcode/), introduced with OSX 10.5 (Leopard). To
+# create a documentation set, doxygen will generate a Makefile in the HTML
+# output directory. Running make will produce the docset in that directory and
+# running make install will install the docset in
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
+# startup. See https://developer.apple.com/library/archive/featuredarticles/Doxy
+# genXcode/_index.html for more information.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_DOCSET        = NO
+
+# This tag determines the name of the docset feed. A documentation feed provides
+# an umbrella under which multiple documentation sets from a single provider
+# (such as a company or product suite) can be grouped.
+# The default value is: Doxygen generated docs.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_FEEDNAME        = "Doxygen generated docs"
+
+# This tag specifies a string that should uniquely identify the documentation
+# set bundle. This should be a reverse domain-name style string, e.g.
+# com.mycompany.MyDocSet. Doxygen will append .docset to the name.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_BUNDLE_ID       = org.doxygen.Project
+
+# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify
+# the documentation publisher. This should be a reverse domain-name style
+# string, e.g. com.mycompany.MyDocSet.documentation.
+# The default value is: org.doxygen.Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_PUBLISHER_ID    = org.doxygen.Publisher
+
+# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher.
+# The default value is: Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_PUBLISHER_NAME  = Publisher
+
+# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
+# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
+# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
+# (see:
+# https://www.microsoft.com/en-us/download/details.aspx?id=21138) on Windows.
+#
+# The HTML Help Workshop contains a compiler that can convert all HTML output
+# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
+# files are now used as the Windows 98 help format, and will replace the old
+# Windows help format (.hlp) on all Windows platforms in the future. Compressed
+# HTML files also contain an index, a table of contents, and you can search for
+# words in the documentation. The HTML workshop also contains a viewer for
+# compressed HTML files.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_HTMLHELP      = NO
+
+# The CHM_FILE tag can be used to specify the file name of the resulting .chm
+# file. You can add a path in front of the file if the result should not be
+# written to the html output directory.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+CHM_FILE               =
+
+# The HHC_LOCATION tag can be used to specify the location (absolute path
+# including file name) of the HTML help compiler (hhc.exe). If non-empty,
+# doxygen will try to run the HTML help compiler on the generated index.hhp.
+# The file has to be specified with full path.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+HHC_LOCATION           =
+
+# The GENERATE_CHI flag controls if a separate .chi index file is generated
+# (YES) or that it should be included in the main .chm file (NO).
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+GENERATE_CHI           = NO
+
+# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc)
+# and project file content.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+CHM_INDEX_ENCODING     =
+
+# The BINARY_TOC flag controls whether a binary table of contents is generated
+# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it
+# enables the Previous and Next buttons.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+BINARY_TOC             = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members to
+# the table of contents of the HTML help documentation and to the tree view.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+TOC_EXPAND             = NO
+
+# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
+# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that
+# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help
+# (.qch) of the generated HTML documentation.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_QHP           = NO
+
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify
+# the file name of the resulting .qch file. The path specified is relative to
+# the HTML output folder.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QCH_FILE               =
+
+# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
+# Project output. For more information please see Qt Help Project / Namespace
+# (see:
+# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace).
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_NAMESPACE          = org.doxygen.Project
+
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
+# Help Project output. For more information please see Qt Help Project / Virtual
+# Folders (see:
+# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual-folders).
+# The default value is: doc.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_VIRTUAL_FOLDER     = doc
+
+# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
+# filter to add. For more information please see Qt Help Project / Custom
+# Filters (see:
+# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_CUST_FILTER_NAME   =
+
+# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
+# custom filter to add. For more information please see Qt Help Project / Custom
+# Filters (see:
+# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_CUST_FILTER_ATTRS  =
+
+# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
+# project's filter section matches. Qt Help Project / Filter Attributes (see:
+# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#filter-attributes).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_SECT_FILTER_ATTRS  =
+
+# The QHG_LOCATION tag can be used to specify the location (absolute path
+# including file name) of Qt's qhelpgenerator. If non-empty doxygen will try to
+# run qhelpgenerator on the generated .qhp file.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHG_LOCATION           =
+
+# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be
+# generated, together with the HTML files, they form an Eclipse help plugin. To
+# install this plugin and make it available under the help contents menu in
+# Eclipse, the contents of the directory containing the HTML and XML files needs
+# to be copied into the plugins directory of eclipse. The name of the directory
+# within the plugins directory should be the same as the ECLIPSE_DOC_ID value.
+# After copying Eclipse needs to be restarted before the help appears.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_ECLIPSEHELP   = NO
+
+# A unique identifier for the Eclipse help plugin. When installing the plugin
+# the directory name containing the HTML and XML files should also have this
+# name. Each documentation set should have its own identifier.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES.
+
+ECLIPSE_DOC_ID         = org.doxygen.Project
+
+# If you want full control over the layout of the generated HTML pages it might
+# be necessary to disable the index and replace it with your own. The
+# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top
+# of each HTML page. A value of NO enables the index and the value YES disables
+# it. Since the tabs in the index contain the same information as the navigation
+# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+DISABLE_INDEX          = NO
+
+# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
+# structure should be generated to display hierarchical information. If the tag
+# value is set to YES, a side panel will be generated containing a tree-like
+# index structure (just like the one that is generated for HTML Help). For this
+# to work a browser that supports JavaScript, DHTML, CSS and frames is required
+# (i.e. any modern browser). Windows users are probably better off using the
+# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can
+# further fine-tune the look of the index. As an example, the default style
+# sheet generated by doxygen has an example that shows how to put an image at
+# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
+# the same information as the tab index, you could consider setting
+# DISABLE_INDEX to YES when enabling this option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_TREEVIEW      = NO
+
+# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
+# doxygen will group on one line in the generated HTML documentation.
+#
+# Note that a value of 0 will completely suppress the enum values from appearing
+# in the overview section.
+# Minimum value: 0, maximum value: 20, default value: 4.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+ENUM_VALUES_PER_LINE   = 4
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used
+# to set the initial width (in pixels) of the frame in which the tree is shown.
+# Minimum value: 0, maximum value: 1500, default value: 250.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+TREEVIEW_WIDTH         = 250
+
+# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to
+# external symbols imported via tag files in a separate window.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+EXT_LINKS_IN_WINDOW    = NO
+
+# If the HTML_FORMULA_FORMAT option is set to svg, doxygen will use the pdf2svg
+# tool (see https://github.com/dawbarton/pdf2svg) or inkscape (see
+# https://inkscape.org) to generate formulas as SVG images instead of PNGs for
+# the HTML output. These images will generally look nicer at scaled resolutions.
+# Possible values are: png (the default) and svg (looks nicer but requires the
+# pdf2svg or inkscape tool).
+# The default value is: png.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FORMULA_FORMAT    = png
+
+# Use this tag to change the font size of LaTeX formulas included as images in
+# the HTML documentation. When you change the font size after a successful
+# doxygen run you need to manually remove any form_*.png images from the HTML
+# output directory to force them to be regenerated.
+# Minimum value: 8, maximum value: 50, default value: 10.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+FORMULA_FONTSIZE       = 10
+
+# Use the FORMULA_TRANSPARENT tag to determine whether or not the images
+# generated for formulas are transparent PNGs. Transparent PNGs are not
+# supported properly for IE 6.0, but are supported on all modern browsers.
+#
+# Note that when changing this option you need to delete any form_*.png files in
+# the HTML output directory before the changes have effect.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+FORMULA_TRANSPARENT    = YES
+
+# The FORMULA_MACROFILE can contain LaTeX \newcommand and \renewcommand commands
+# to create new LaTeX commands to be used in formulas as building blocks. See
+# the section "Including formulas" for details.
+
+FORMULA_MACROFILE      =
+
+# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
+# https://www.mathjax.org) which uses client side JavaScript for the rendering
+# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX
+# installed or if you want to formulas look prettier in the HTML output. When
+# enabled you may also need to install MathJax separately and configure the path
+# to it using the MATHJAX_RELPATH option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+USE_MATHJAX            = NO
+
+# When MathJax is enabled you can set the default output format to be used for
+# the MathJax output. See the MathJax site (see:
+# http://docs.mathjax.org/en/v2.7-latest/output.html) for more details.
+# Possible values are: HTML-CSS (which is slower, but has the best
+# compatibility), NativeMML (i.e. MathML) and SVG.
+# The default value is: HTML-CSS.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_FORMAT         = HTML-CSS
+
+# When MathJax is enabled you need to specify the location relative to the HTML
+# output directory using the MATHJAX_RELPATH option. The destination directory
+# should contain the MathJax.js script. For instance, if the mathjax directory
+# is located at the same level as the HTML output directory, then
+# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
+# Content Delivery Network so you can quickly see the result without installing
+# MathJax. However, it is strongly recommended to install a local copy of
+# MathJax from https://www.mathjax.org before deployment.
+# The default value is: https://cdn.jsdelivr.net/npm/mathjax@2.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_RELPATH        = https://cdn.jsdelivr.net/npm/mathjax@2
+
+# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
+# extension names that should be enabled during MathJax rendering. For example
+# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_EXTENSIONS     =
+
+# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
+# of code that will be used on startup of the MathJax code. See the MathJax site
+# (see:
+# http://docs.mathjax.org/en/v2.7-latest/output.html) for more details. For an
+# example see the documentation.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_CODEFILE       =
+
+# When the SEARCHENGINE tag is enabled doxygen will generate a search box for
+# the HTML output. The underlying search engine uses javascript and DHTML and
+# should work on any modern browser. Note that when using HTML help
+# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET)
+# there is already a search function so this one should typically be disabled.
+# For large projects the javascript based search engine can be slow, then
+# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to
+# search using the keyboard; to jump to the search box use <access key> + S
+# (what the <access key> is depends on the OS and browser, but it is typically
+# <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down
+# key> to jump into the search results window, the results can be navigated
+# using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel
+# the search. The filter options can be selected when the cursor is inside the
+# search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys>
+# to select a filter and <Enter> or <escape> to activate or cancel the filter
+# option.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+SEARCHENGINE           = YES
+
+# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
+# implemented using a web server instead of a web client using JavaScript. There
+# are two flavors of web server based searching depending on the EXTERNAL_SEARCH
+# setting. When disabled, doxygen will generate a PHP script for searching and
+# an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing
+# and searching needs to be provided by external tools. See the section
+# "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SERVER_BASED_SEARCH    = NO
+
+# When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP
+# script for searching. Instead the search results are written to an XML file
+# which needs to be processed by an external indexer. Doxygen will invoke an
+# external search engine pointed to by the SEARCHENGINE_URL option to obtain the
+# search results.
+#
+# Doxygen ships with an example indexer (doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see:
+# https://xapian.org/).
+#
+# See the section "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH        = NO
+
+# The SEARCHENGINE_URL should point to a search engine hosted by a web server
+# which will return the search results when EXTERNAL_SEARCH is enabled.
+#
+# Doxygen ships with an example indexer (doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see:
+# https://xapian.org/). See the section "External Indexing and Searching" for
+# details.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHENGINE_URL       =
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
+# search data is written to a file for indexing by an external tool. With the
+# SEARCHDATA_FILE tag the name of this file can be specified.
+# The default file is: searchdata.xml.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHDATA_FILE        = searchdata.xml
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the
+# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
+# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
+# projects and redirect the results back to the right project.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH_ID     =
+
+# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
+# projects other than the one defined by this configuration file, but that are
+# all added to the same external search index. Each project needs to have a
+# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of
+# to a relative location where the documentation can be found. The format is:
+# EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ...
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTRA_SEARCH_MAPPINGS  =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES, doxygen will generate LaTeX output.
+# The default value is: YES.
+
+GENERATE_LATEX         = YES
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: latex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_OUTPUT           = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked.
+#
+# Note that when not enabling USE_PDFLATEX the default is latex when enabling
+# USE_PDFLATEX the default is pdflatex and when in the later case latex is
+# chosen this is overwritten by pdflatex. For specific output languages the
+# default can have been set differently, this depends on the implementation of
+# the output language.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_CMD_NAME         = xelatex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
+# index for LaTeX.
+# Note: This tag is used in the Makefile / make.bat.
+# See also: LATEX_MAKEINDEX_CMD for the part in the generated output file
+# (.tex).
+# The default file is: makeindex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+MAKEINDEX_CMD_NAME     = makeindex
+
+# The LATEX_MAKEINDEX_CMD tag can be used to specify the command name to
+# generate index for LaTeX. In case there is no backslash (\) as first character
+# it will be automatically added in the LaTeX code.
+# Note: This tag is used in the generated output file (.tex).
+# See also: MAKEINDEX_CMD_NAME for the part in the Makefile / make.bat.
+# The default value is: makeindex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_MAKEINDEX_CMD    = makeindex
+
+# If the COMPACT_LATEX tag is set to YES, doxygen generates more compact LaTeX
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+COMPACT_LATEX          = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used by the
+# printer.
+# Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x
+# 14 inches) and executive (7.25 x 10.5 inches).
+# The default value is: a4.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+PAPER_TYPE             = a4
+
+# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
+# that should be included in the LaTeX output. The package can be specified just
+# by its name or with the correct syntax as to be used with the LaTeX
+# \usepackage command. To get the times font for instance you can specify :
+# EXTRA_PACKAGES=times or EXTRA_PACKAGES={times}
+# To use the option intlimits with the amsmath package you can specify:
+# EXTRA_PACKAGES=[intlimits]{amsmath}
+# If left blank no extra packages will be included.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+EXTRA_PACKAGES         = [utf8]{inputenc}
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
+# generated LaTeX document. The header should contain everything until the first
+# chapter. If it is left blank doxygen will generate a standard header. See
+# section "Doxygen usage" for information on how to let doxygen write the
+# default header to a separate file.
+#
+# Note: Only use a user-defined header if you know what you are doing! The
+# following commands have a special meaning inside the header: $title,
+# $datetime, $date, $doxygenversion, $projectname, $projectnumber,
+# $projectbrief, $projectlogo. Doxygen will replace $title with the empty
+# string, for the replacement values of the other commands the user is referred
+# to HTML_HEADER.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_HEADER           =
+
+# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
+# generated LaTeX document. The footer should contain everything after the last
+# chapter. If it is left blank doxygen will generate a standard footer. See
+# LATEX_HEADER for more information on how to generate a default footer and what
+# special commands can be used inside the footer.
+#
+# Note: Only use a user-defined footer if you know what you are doing!
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_FOOTER           =
+
+# The LATEX_EXTRA_STYLESHEET tag can be used to specify additional user-defined
+# LaTeX style sheets that are included after the standard style sheets created
+# by doxygen. Using this option one can overrule certain style aspects. Doxygen
+# will copy the style sheet files to the output directory.
+# Note: The order of the extra style sheet files is of importance (e.g. the last
+# style sheet in the list overrules the setting of the previous ones in the
+# list).
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_EXTRA_STYLESHEET =
+
+# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the LATEX_OUTPUT output
+# directory. Note that the files will be copied as-is; there are no commands or
+# markers available.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_EXTRA_FILES      =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is
+# prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will
+# contain links (just like the HTML output) instead of page references. This
+# makes the output suitable for online browsing using a PDF viewer.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+PDF_HYPERLINKS         = YES
+
+# If the USE_PDFLATEX tag is set to YES, doxygen will use the engine as
+# specified with LATEX_CMD_NAME to generate the PDF file directly from the LaTeX
+# files. Set this option to YES, to get a higher quality PDF documentation.
+#
+# See also section LATEX_CMD_NAME for selecting the engine.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+USE_PDFLATEX           = YES
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode
+# command to the generated LaTeX files. This will instruct LaTeX to keep running
+# if errors occur, instead of asking the user for help. This option is also used
+# when generating formulas in HTML.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_BATCHMODE        = YES
+
+# If the LATEX_HIDE_INDICES tag is set to YES then doxygen will not include the
+# index chapters (such as File Index, Compound Index, etc.) in the output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_HIDE_INDICES     = NO
+
+# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source
+# code with syntax highlighting in the LaTeX output.
+#
+# Note that which sources are shown also depends on other settings such as
+# SOURCE_BROWSER.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_SOURCE_CODE      = NO
+
+# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
+# bibliography, e.g. plainnat, or ieeetr. See
+# https://en.wikipedia.org/wiki/BibTeX and \cite for more info.
+# The default value is: plain.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_BIB_STYLE        = plain
+
+# If the LATEX_TIMESTAMP tag is set to YES then the footer of each generated
+# page will contain the date and time when the page was generated. Setting this
+# to NO can help when comparing the output of multiple runs.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_TIMESTAMP        = NO
+
+# The LATEX_EMOJI_DIRECTORY tag is used to specify the (relative or absolute)
+# path from which the emoji images will be read. If a relative path is entered,
+# it will be relative to the LATEX_OUTPUT directory. If left blank the
+# LATEX_OUTPUT directory will be used.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_EMOJI_DIRECTORY  =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES, doxygen will generate RTF output. The
+# RTF output is optimized for Word 97 and may not look too pretty with other RTF
+# readers/editors.
+# The default value is: NO.
+
+GENERATE_RTF           = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: rtf.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_OUTPUT             = rtf
+
+# If the COMPACT_RTF tag is set to YES, doxygen generates more compact RTF
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+COMPACT_RTF            = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated will
+# contain hyperlink fields. The RTF file will contain links (just like the HTML
+# output) instead of page references. This makes the output suitable for online
+# browsing using Word or some other Word compatible readers that support those
+# fields.
+#
+# Note: WordPad (write) and others do not support links.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_HYPERLINKS         = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's
+# configuration file, i.e. a series of assignments. You only have to provide
+# replacements, missing definitions are set to their default value.
+#
+# See also section "Doxygen usage" for information on how to generate the
+# default style sheet that doxygen normally uses.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_STYLESHEET_FILE    =
+
+# Set optional variables used in the generation of an RTF document. Syntax is
+# similar to doxygen's configuration file. A template extensions file can be
+# generated using doxygen -e rtf extensionFile.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_EXTENSIONS_FILE    =
+
+# If the RTF_SOURCE_CODE tag is set to YES then doxygen will include source code
+# with syntax highlighting in the RTF output.
+#
+# Note that which sources are shown also depends on other settings such as
+# SOURCE_BROWSER.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_SOURCE_CODE        = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES, doxygen will generate man pages for
+# classes and files.
+# The default value is: NO.
+
+GENERATE_MAN           = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it. A directory man3 will be created inside the directory specified by
+# MAN_OUTPUT.
+# The default directory is: man.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_OUTPUT             = man
+
+# The MAN_EXTENSION tag determines the extension that is added to the generated
+# man pages. In case the manual section does not start with a number, the number
+# 3 is prepended. The dot (.) at the beginning of the MAN_EXTENSION tag is
+# optional.
+# The default value is: .3.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_EXTENSION          = .3
+
+# The MAN_SUBDIR tag determines the name of the directory created within
+# MAN_OUTPUT in which the man pages are placed. If defaults to man followed by
+# MAN_EXTENSION with the initial . removed.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_SUBDIR             =
+
+# If the MAN_LINKS tag is set to YES and doxygen generates man output, then it
+# will generate one additional man file for each entity documented in the real
+# man page(s). These additional files only source the real man page, but without
+# them the man command would be unable to find the correct page.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_LINKS              = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES, doxygen will generate an XML file that
+# captures the structure of the code including all documentation.
+# The default value is: NO.
+
+GENERATE_XML           = NO
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: xml.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_OUTPUT             = xml
+
+# If the XML_PROGRAMLISTING tag is set to YES, doxygen will dump the program
+# listings (including syntax highlighting and cross-referencing information) to
+# the XML output. Note that enabling this will significantly increase the size
+# of the XML output.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_PROGRAMLISTING     = YES
+
+# If the XML_NS_MEMB_FILE_SCOPE tag is set to YES, doxygen will include
+# namespace members in file scope as well, matching the HTML output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_NS_MEMB_FILE_SCOPE = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the DOCBOOK output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_DOCBOOK tag is set to YES, doxygen will generate Docbook files
+# that can be used to generate PDF.
+# The default value is: NO.
+
+GENERATE_DOCBOOK       = NO
+
+# The DOCBOOK_OUTPUT tag is used to specify where the Docbook pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in
+# front of it.
+# The default directory is: docbook.
+# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
+
+DOCBOOK_OUTPUT         = docbook
+
+# If the DOCBOOK_PROGRAMLISTING tag is set to YES, doxygen will include the
+# program listings (including syntax highlighting and cross-referencing
+# information) to the DOCBOOK output. Note that enabling this will significantly
+# increase the size of the DOCBOOK output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
+
+DOCBOOK_PROGRAMLISTING = NO
+
+#---------------------------------------------------------------------------
+# Configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES, doxygen will generate an
+# AutoGen Definitions (see http://autogen.sourceforge.net/) file that captures
+# the structure of the code including all documentation. Note that this feature
+# is still experimental and incomplete at the moment.
+# The default value is: NO.
+
+GENERATE_AUTOGEN_DEF   = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES, doxygen will generate a Perl module
+# file that captures the structure of the code including all documentation.
+#
+# Note that this feature is still experimental and incomplete at the moment.
+# The default value is: NO.
+
+GENERATE_PERLMOD       = NO
+
+# If the PERLMOD_LATEX tag is set to YES, doxygen will generate the necessary
+# Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI
+# output from the Perl module output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_LATEX          = NO
+
+# If the PERLMOD_PRETTY tag is set to YES, the Perl module output will be nicely
+# formatted so it can be parsed by a human reader. This is useful if you want to
+# understand what is going on. On the other hand, if this tag is set to NO, the
+# size of the Perl module output will be much smaller and Perl will parse it
+# just the same.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_PRETTY         = YES
+
+# The names of the make variables in the generated doxyrules.make file are
+# prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. This is useful
+# so different doxyrules.make files included by the same Makefile don't
+# overwrite each other's variables.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES, doxygen will evaluate all
+# C-preprocessor directives found in the sources and include files.
+# The default value is: YES.
+
+ENABLE_PREPROCESSING   = YES
+
+# If the MACRO_EXPANSION tag is set to YES, doxygen will expand all macro names
+# in the source code. If set to NO, only conditional compilation will be
+# performed. Macro expansion can be done in a controlled way by setting
+# EXPAND_ONLY_PREDEF to YES.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+MACRO_EXPANSION        = NO
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then
+# the macro expansion is limited to the macros specified with the PREDEFINED and
+# EXPAND_AS_DEFINED tags.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+EXPAND_ONLY_PREDEF     = NO
+
+# If the SEARCH_INCLUDES tag is set to YES, the include files in the
+# INCLUDE_PATH will be searched if a #include is found.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+SEARCH_INCLUDES        = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by the
+# preprocessor.
+# This tag requires that the tag SEARCH_INCLUDES is set to YES.
+
+INCLUDE_PATH           =
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will be
+# used.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+INCLUDE_FILE_PATTERNS  =
+
+# The PREDEFINED tag can be used to specify one or more macro names that are
+# defined before the preprocessor is started (similar to the -D option of e.g.
+# gcc). The argument of the tag is a list of macros of the form: name or
+# name=definition (no spaces). If the definition and the "=" are omitted, "=1"
+# is assumed. To prevent a macro definition from being undefined via #undef or
+# recursively expanded use the := operator instead of the = operator.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+PREDEFINED             =
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
+# tag can be used to specify a list of macro names that should be expanded. The
+# macro definition that is found in the sources will be used. Use the PREDEFINED
+# tag if you want to use a different macro definition that overrules the
+# definition found in the source code.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+EXPAND_AS_DEFINED      =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will
+# remove all references to function-like macros that are alone on a line, have
+# an all uppercase name, and do not end with a semicolon. Such function macros
+# are typically used for boiler-plate code, and will confuse the parser if not
+# removed.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+SKIP_FUNCTION_MACROS   = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to external references
+#---------------------------------------------------------------------------
+
+# The TAGFILES tag can be used to specify one or more tag files. For each tag
+# file the location of the external documentation should be added. The format of
+# a tag file without this location is as follows:
+# TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where loc1 and loc2 can be relative or absolute paths or URLs. See the
+# section "Linking to external documentation" for more information about the use
+# of tag files.
+# Note: Each tag file must have a unique name (where the name does NOT include
+# the path). If a tag file is not located in the directory in which doxygen is
+# run, you must also specify the path to the tagfile here.
+
+TAGFILES               =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create a
+# tag file that is based on the input files it reads. See section "Linking to
+# external documentation" for more information about the usage of tag files.
+
+GENERATE_TAGFILE       =
+
+# If the ALLEXTERNALS tag is set to YES, all external class will be listed in
+# the class index. If set to NO, only the inherited external classes will be
+# listed.
+# The default value is: NO.
+
+ALLEXTERNALS           = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES, all external groups will be listed
+# in the modules index. If set to NO, only the current project's groups will be
+# listed.
+# The default value is: YES.
+
+EXTERNAL_GROUPS        = YES
+
+# If the EXTERNAL_PAGES tag is set to YES, all external pages will be listed in
+# the related pages index. If set to NO, only the current project's pages will
+# be listed.
+# The default value is: YES.
+
+EXTERNAL_PAGES         = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES, doxygen will generate a class diagram
+# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to
+# NO turns the diagrams off. Note that this option also works with HAVE_DOT
+# disabled, but it is recommended to install and use dot, since it yields more
+# powerful graphs.
+# The default value is: YES.
+
+CLASS_DIAGRAMS         = YES
+
+# You can include diagrams made with dia in doxygen documentation. Doxygen will
+# then run dia to produce the diagram and insert it in the documentation. The
+# DIA_PATH tag allows you to specify the directory where the dia binary resides.
+# If left empty dia is assumed to be found in the default search path.
+
+DIA_PATH               =
+
+# If set to YES the inheritance and collaboration graphs will hide inheritance
+# and usage relations if the target is undocumented or is not a class.
+# The default value is: YES.
+
+HIDE_UNDOC_RELATIONS   = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz (see:
+# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
+# Bell Labs. The other options in this section have no effect if this option is
+# set to NO
+# The default value is: YES.
+
+HAVE_DOT               = YES
+
+# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is allowed
+# to run in parallel. When set to 0 doxygen will base this on the number of
+# processors available in the system. You can set it explicitly to a value
+# larger than 0 to get control over the balance between CPU load and processing
+# speed.
+# Minimum value: 0, maximum value: 32, default value: 0.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_NUM_THREADS        = 0
+
+# When you want a differently looking font in the dot files that doxygen
+# generates you can specify the font name using DOT_FONTNAME. You need to make
+# sure dot is able to find the font, which can be done by putting it in a
+# standard location or by setting the DOTFONTPATH environment variable or by
+# setting DOT_FONTPATH to the directory containing the font.
+# The default value is: Helvetica.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTNAME           = Helvetica
+
+# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of
+# dot graphs.
+# Minimum value: 4, maximum value: 24, default value: 10.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTSIZE           = 10
+
+# By default doxygen will tell dot to use the default font as specified with
+# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set
+# the path where dot can find it using this tag.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTPATH           =
+
+# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for
+# each documented class showing the direct and indirect inheritance relations.
+# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CLASS_GRAPH            = YES
+
+# If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a
+# graph for each documented class showing the direct and indirect implementation
+# dependencies (inheritance, containment, and class references variables) of the
+# class with other documented classes.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+COLLABORATION_GRAPH    = YES
+
+# If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for
+# groups, showing the direct groups dependencies.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GROUP_GRAPHS           = YES
+
+# If the UML_LOOK tag is set to YES, doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
+# Language.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+UML_LOOK               = NO
+
+# If the UML_LOOK tag is enabled, the fields and methods are shown inside the
+# class node. If there are many fields or methods and many nodes the graph may
+# become too big to be useful. The UML_LIMIT_NUM_FIELDS threshold limits the
+# number of items for each type to make the size more manageable. Set this to 0
+# for no limit. Note that the threshold may be exceeded by 50% before the limit
+# is enforced. So when you set the threshold to 10, up to 15 fields may appear,
+# but if the number exceeds 15, the total amount of fields shown is limited to
+# 10.
+# Minimum value: 0, maximum value: 100, default value: 10.
+# This tag requires that the tag UML_LOOK is set to YES.
+
+UML_LIMIT_NUM_FIELDS   = 10
+
+# If the DOT_UML_DETAILS tag is set to NO, doxygen will show attributes and
+# methods without types and arguments in the UML graphs. If the DOT_UML_DETAILS
+# tag is set to YES, doxygen will add type and arguments for attributes and
+# methods in the UML graphs. If the DOT_UML_DETAILS tag is set to NONE, doxygen
+# will not generate fields with class member information in the UML graphs. The
+# class diagrams will look similar to the default class diagrams but using UML
+# notation for the relationships.
+# Possible values are: NO, YES and NONE.
+# The default value is: NO.
+# This tag requires that the tag UML_LOOK is set to YES.
+
+DOT_UML_DETAILS        = NO
+
+# The DOT_WRAP_THRESHOLD tag can be used to set the maximum number of characters
+# to display on a single line. If the actual line length exceeds this threshold
+# significantly it will wrapped across multiple lines. Some heuristics are apply
+# to avoid ugly line breaks.
+# Minimum value: 0, maximum value: 1000, default value: 17.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_WRAP_THRESHOLD     = 17
+
+# If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and
+# collaboration graphs will show the relations between templates and their
+# instances.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+TEMPLATE_RELATIONS     = NO
+
+# If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to
+# YES then doxygen will generate a graph for each documented file showing the
+# direct and indirect include dependencies of the file with other documented
+# files.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INCLUDE_GRAPH          = YES
+
+# If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are
+# set to YES then doxygen will generate a graph for each documented file showing
+# the direct and indirect include dependencies of the file with other documented
+# files.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INCLUDED_BY_GRAPH      = YES
+
+# If the CALL_GRAPH tag is set to YES then doxygen will generate a call
+# dependency graph for every global function or class method.
+#
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable call graphs for selected
+# functions only using the \callgraph command. Disabling a call graph can be
+# accomplished by means of the command \hidecallgraph.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CALL_GRAPH             = NO
+
+# If the CALLER_GRAPH tag is set to YES then doxygen will generate a caller
+# dependency graph for every global function or class method.
+#
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable caller graphs for selected
+# functions only using the \callergraph command. Disabling a caller graph can be
+# accomplished by means of the command \hidecallergraph.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CALLER_GRAPH           = NO
+
+# If the GRAPHICAL_HIERARCHY tag is set to YES then doxygen will graphical
+# hierarchy of all classes instead of a textual one.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GRAPHICAL_HIERARCHY    = YES
+
+# If the DIRECTORY_GRAPH tag is set to YES then doxygen will show the
+# dependencies a directory has on other directories in a graphical way. The
+# dependency relations are determined by the #include relations between the
+# files in the directories.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DIRECTORY_GRAPH        = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot. For an explanation of the image formats see the section
+# output formats in the documentation of the dot tool (Graphviz (see:
+# http://www.graphviz.org/)).
+# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
+# to make the SVG files visible in IE 9+ (other browsers do not have this
+# requirement).
+# Possible values are: png, png:cairo, png:cairo:cairo, png:cairo:gd, png:gd,
+# png:gd:gd, jpg, jpg:cairo, jpg:cairo:gd, jpg:gd, jpg:gd:gd, gif, gif:cairo,
+# gif:cairo:gd, gif:gd, gif:gd:gd, svg, png:gd, png:gd:gd, png:cairo,
+# png:cairo:gd, png:cairo:cairo, png:cairo:gdiplus, png:gdiplus and
+# png:gdiplus:gdiplus.
+# The default value is: png.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_IMAGE_FORMAT       = png
+
+# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
+# enable generation of interactive SVG images that allow zooming and panning.
+#
+# Note that this requires a modern browser other than Internet Explorer. Tested
+# and working are Firefox, Chrome, Safari, and Opera.
+# Note: For IE 9+ you need to set HTML_FILE_EXTENSION to xhtml in order to make
+# the SVG files visible. Older versions of IE do not have SVG support.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INTERACTIVE_SVG        = NO
+
+# The DOT_PATH tag can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found in the path.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_PATH               =
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the \dotfile
+# command).
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOTFILE_DIRS           =
+
+# The MSCFILE_DIRS tag can be used to specify one or more directories that
+# contain msc files that are included in the documentation (see the \mscfile
+# command).
+
+MSCFILE_DIRS           =
+
+# The DIAFILE_DIRS tag can be used to specify one or more directories that
+# contain dia files that are included in the documentation (see the \diafile
+# command).
+
+DIAFILE_DIRS           =
+
+# When using plantuml, the PLANTUML_JAR_PATH tag should be used to specify the
+# path where java can find the plantuml.jar file. If left blank, it is assumed
+# PlantUML is not used or called during a preprocessing step. Doxygen will
+# generate a warning when it encounters a \startuml command in this case and
+# will not generate output for the diagram.
+
+PLANTUML_JAR_PATH      =
+
+# When using plantuml, the PLANTUML_CFG_FILE tag can be used to specify a
+# configuration file for plantuml.
+
+PLANTUML_CFG_FILE      =
+
+# When using plantuml, the specified paths are searched for files specified by
+# the !include statement in a plantuml block.
+
+PLANTUML_INCLUDE_PATH  =
+
+# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes
+# that will be shown in the graph. If the number of nodes in a graph becomes
+# larger than this value, doxygen will truncate the graph, which is visualized
+# by representing a node as a red box. Note that doxygen if the number of direct
+# children of the root node in a graph is already larger than
+# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note that
+# the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+# Minimum value: 0, maximum value: 10000, default value: 50.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_GRAPH_MAX_NODES    = 50
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the graphs
+# generated by dot. A depth value of 3 means that only nodes reachable from the
+# root by following a path via at most 3 edges will be shown. Nodes that lay
+# further from the root node will be omitted. Note that setting this option to 1
+# or 2 may greatly reduce the computation time needed for large code bases. Also
+# note that the size of a graph can be further restricted by
+# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
+# Minimum value: 0, maximum value: 1000, default value: 0.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+MAX_DOT_GRAPH_DEPTH    = 0
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is disabled by default, because dot on Windows does not seem
+# to support this out of the box.
+#
+# Warning: Depending on the platform used, enabling this option may lead to
+# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
+# read).
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_TRANSPARENT        = NO
+
+# Set the DOT_MULTI_TARGETS tag to YES to allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10) support
+# this, this feature is disabled by default.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_MULTI_TARGETS      = NO
+
+# If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page
+# explaining the meaning of the various boxes and arrows in the dot generated
+# graphs.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GENERATE_LEGEND        = YES
+
+# If the DOT_CLEANUP tag is set to YES, doxygen will remove the intermediate
+# files that are used to generate the various graphs.
+#
+# Note: This setting is not only used for dot files but also for msc and
+# plantuml temporary files.
+# The default value is: YES.
+
+DOT_CLEANUP            = YES
diff --git a/kdotpy-v1.0.0/LICENSE b/kdotpy-v1.0.0/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..88922f9e5a74bb0d42108eba9f4cb3378e82e4be
--- /dev/null
+++ b/kdotpy-v1.0.0/LICENSE
@@ -0,0 +1,675 @@
+                    GNU GENERAL PUBLIC LICENSE
+                       Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+                            Preamble
+
+  The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+  The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works.  By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.  We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors.  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+  To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights.  Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received.  You must make sure that they, too, receive
+or can get the source code.  And you must show them these terms so they
+know their rights.
+
+  Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+  For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software.  For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+  Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so.  This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software.  The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable.  Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products.  If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+  Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary.  To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+                       TERMS AND CONDITIONS
+
+  0. Definitions.
+
+  "This License" refers to version 3 of the GNU General Public License.
+
+  "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+  "The Program" refers to any copyrightable work licensed under this
+License.  Each licensee is addressed as "you".  "Licensees" and
+"recipients" may be individuals or organizations.
+
+  To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy.  The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+  A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+  To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy.  Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+  To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies.  Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+  An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License.  If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+  1. Source Code.
+
+  The "source code" for a work means the preferred form of the work
+for making modifications to it.  "Object code" means any non-source
+form of a work.
+
+  A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+  The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form.  A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+  The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities.  However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work.  For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+  The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+  The Corresponding Source for a work in source code form is that
+same work.
+
+  2. Basic Permissions.
+
+  All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met.  This License explicitly affirms your unlimited
+permission to run the unmodified Program.  The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work.  This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+  You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force.  You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright.  Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+  Conveying under any other circumstances is permitted solely under
+the conditions stated below.  Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+  3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+  No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+  When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+  4. Conveying Verbatim Copies.
+
+  You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+  You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+  5. Conveying Modified Source Versions.
+
+  You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+    a) The work must carry prominent notices stating that you modified
+    it, and giving a relevant date.
+
+    b) The work must carry prominent notices stating that it is
+    released under this License and any conditions added under section
+    7.  This requirement modifies the requirement in section 4 to
+    "keep intact all notices".
+
+    c) You must license the entire work, as a whole, under this
+    License to anyone who comes into possession of a copy.  This
+    License will therefore apply, along with any applicable section 7
+    additional terms, to the whole of the work, and all its parts,
+    regardless of how they are packaged.  This License gives no
+    permission to license the work in any other way, but it does not
+    invalidate such permission if you have separately received it.
+
+    d) If the work has interactive user interfaces, each must display
+    Appropriate Legal Notices; however, if the Program has interactive
+    interfaces that do not display Appropriate Legal Notices, your
+    work need not make them do so.
+
+  A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit.  Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+  6. Conveying Non-Source Forms.
+
+  You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+    a) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by the
+    Corresponding Source fixed on a durable physical medium
+    customarily used for software interchange.
+
+    b) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by a
+    written offer, valid for at least three years and valid for as
+    long as you offer spare parts or customer support for that product
+    model, to give anyone who possesses the object code either (1) a
+    copy of the Corresponding Source for all the software in the
+    product that is covered by this License, on a durable physical
+    medium customarily used for software interchange, for a price no
+    more than your reasonable cost of physically performing this
+    conveying of source, or (2) access to copy the
+    Corresponding Source from a network server at no charge.
+
+    c) Convey individual copies of the object code with a copy of the
+    written offer to provide the Corresponding Source.  This
+    alternative is allowed only occasionally and noncommercially, and
+    only if you received the object code with such an offer, in accord
+    with subsection 6b.
+
+    d) Convey the object code by offering access from a designated
+    place (gratis or for a charge), and offer equivalent access to the
+    Corresponding Source in the same way through the same place at no
+    further charge.  You need not require recipients to copy the
+    Corresponding Source along with the object code.  If the place to
+    copy the object code is a network server, the Corresponding Source
+    may be on a different server (operated by you or a third party)
+    that supports equivalent copying facilities, provided you maintain
+    clear directions next to the object code saying where to find the
+    Corresponding Source.  Regardless of what server hosts the
+    Corresponding Source, you remain obligated to ensure that it is
+    available for as long as needed to satisfy these requirements.
+
+    e) Convey the object code using peer-to-peer transmission, provided
+    you inform other peers where the object code and Corresponding
+    Source of the work are being offered to the general public at no
+    charge under subsection 6d.
+
+  A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+  A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling.  In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage.  For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product.  A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+  "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source.  The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+  If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information.  But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+  The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed.  Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+  Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+  7. Additional Terms.
+
+  "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law.  If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+  When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it.  (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.)  You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+  Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+    a) Disclaiming warranty or limiting liability differently from the
+    terms of sections 15 and 16 of this License; or
+
+    b) Requiring preservation of specified reasonable legal notices or
+    author attributions in that material or in the Appropriate Legal
+    Notices displayed by works containing it; or
+
+    c) Prohibiting misrepresentation of the origin of that material, or
+    requiring that modified versions of such material be marked in
+    reasonable ways as different from the original version; or
+
+    d) Limiting the use for publicity purposes of names of licensors or
+    authors of the material; or
+
+    e) Declining to grant rights under trademark law for use of some
+    trade names, trademarks, or service marks; or
+
+    f) Requiring indemnification of licensors and authors of that
+    material by anyone who conveys the material (or modified versions of
+    it) with contractual assumptions of liability to the recipient, for
+    any liability that these contractual assumptions directly impose on
+    those licensors and authors.
+
+  All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10.  If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term.  If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+  If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+  Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+  8. Termination.
+
+  You may not propagate or modify a covered work except as expressly
+provided under this License.  Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+  However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+  Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+  Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License.  If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+  9. Acceptance Not Required for Having Copies.
+
+  You are not required to accept this License in order to receive or
+run a copy of the Program.  Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance.  However,
+nothing other than this License grants you permission to propagate or
+modify any covered work.  These actions infringe copyright if you do
+not accept this License.  Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+  10. Automatic Licensing of Downstream Recipients.
+
+  Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License.  You are not responsible
+for enforcing compliance by third parties with this License.
+
+  An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations.  If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+  You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License.  For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+  11. Patents.
+
+  A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based.  The
+work thus licensed is called the contributor's "contributor version".
+
+  A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version.  For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+  Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+  In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement).  To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+  If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients.  "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+  If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+  A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License.  You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+  Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+  12. No Surrender of Others' Freedom.
+
+  If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all.  For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+  13. Use with the GNU Affero General Public License.
+
+  Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work.  The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+  14. Revised Versions of this License.
+
+  The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+  Each version is given a distinguishing version number.  If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation.  If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+  If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+  Later license versions may give you additional or different
+permissions.  However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+  15. Disclaimer of Warranty.
+
+  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. Limitation of Liability.
+
+  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+  17. Interpretation of Sections 15 and 16.
+
+  If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+                     END OF TERMS AND CONDITIONS
+
+            How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+  If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+    <program>  Copyright (C) <year>  <name of author>
+    This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+    This is free software, and you are welcome to redistribute it
+    under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License.  Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+  You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<https://www.gnu.org/licenses/>.
+
+  The GNU General Public License does not permit incorporating your program
+into proprietary programs.  If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library.  If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.  But first, please read
+<https://www.gnu.org/licenses/why-not-lgpl.html>.
+ 
diff --git a/kdotpy-v1.0.0/LICENSE.additional b/kdotpy-v1.0.0/LICENSE.additional
new file mode 100644
index 0000000000000000000000000000000000000000..ee94edff06e08eb7158e93fe26df1519fdb47208
--- /dev/null
+++ b/kdotpy-v1.0.0/LICENSE.additional
@@ -0,0 +1,22 @@
+Under Section 7 of GPL version 3 we require you to fulfill the following
+additional terms:
+
+    - We require the preservation of the full copyright notice and the license
+      in all original files.
+
+    - We prohibit misrepresentation of the origin of the original files. To
+      obtain the original files, please visit the Git repository at
+      <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+
+    - As part of a scientific environment, we believe it is reasonable to
+      expect that you follow the rules of good scientific practice when using
+      kdotpy. In particular, we expect that you credit the original authors if
+      you benefit from this program, by citing our work, following the
+      citation instructions in the file CITATION.md bundled with kdotpy.
+
+    - If you make substantial changes to kdotpy, we strongly encourage that
+      you contribute to the original project by joining our team. If you use
+      or publish a modified version of this program, you are required to mark
+      your material in a reasonable way as different from the original
+      version.
+
diff --git a/kdotpy-v1.0.0/README.md b/kdotpy-v1.0.0/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..f2ac8a710814169064a27b5653cf271c44283c61
--- /dev/null
+++ b/kdotpy-v1.0.0/README.md
@@ -0,0 +1,172 @@
+**kdotpy** is a Python application for simulating electronic band structures of
+semiconductor devices with k·p theory on a lattice.
+
+
+Installation
+============
+
+You can use `pip` to install kdotpy directly from the repository:
+```sh
+python3 -m pip install git+ssh://git@git.physik.uni-wuerzburg.de/kdotpy/kdotpy.git
+```
+
+Alternatively, you can download the source and install it from your local copy:
+```sh
+git clone https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy.git
+python3 -m pip install ./kdotpy
+```
+
+For an editable install for active coding and debugging, add the `-e` option to
+`pip install`, like so: `python3 -m pip install -e ./kdotpy`.
+
+
+Usage
+=====
+
+kdotpy is designed as a standalone application with a command line interface.
+If you have followed the installation instructions above, you can simply run
+`kdotpy` from the command line, followed by the 'sub-programme' label and
+further arguments. You can do this from any folder.
+
+The first argument is always the sub-programme. The calculation scripts are
+`kdotpy 1d`, `kdotpy 2d`, `kdotpy bulk`, `kdotpy ll`, and `kdotpy bulk-ll`.
+There are two re-plot scripts, `kdotpy merge` and `kdotpy compare`. Batch
+calculations can be done with `kdotpy batch`. The scripts `kdotpy config`,
+`kdotpy help`, and `kdotpy doc` give access to configuration and information.
+Finally, `kdotpy test` runs pre-defined tests for checking that kdotpy works
+correctly.
+
+You can also use `python3 -m kdotpy` followed by the sub-programme and further
+arguments.
+
+
+Example
+-------
+```sh
+kdotpy 2d 8o noax msubst CdZnTe 4% mlayer HgCdTe 68% HgTe HgCdTe 68% llayer 10 7 10 zres 0.25 k -0.6 0.6 / 60 kphi 45 erange -80 0 split 0.01 obs orbitalrgb legend char out -7nm outdir data-qw localminmax
+```
+
+This and more examples can be found in the Tutorials section of the Wiki:
+https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy/-/wikis/tutorials/overview
+
+
+
+More information
+================
+
+**Repository**:
+https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy
+
+**Wiki**:
+https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy/-/wikis/home
+
+**Website**
+https://kdotpy.physik.uni-wuerzburg.de
+
+
+Authors
+=======
+
+The following people are members of the kdotpy collaboration.
+
+Maintainers and developers:
+- **Wouter Beugeling**
+- **Florian Bayer**
+- **Christian Berger**
+- **Maximilian Hofer**
+
+Other contributors:
+- Jan Böttcher
+- Leonid Bovkun
+- Christopher Fuchs
+- Saquib Shamim
+- Moritz Siebert
+- Li-Xian Wang
+- Ewelina M. Hankiewicz
+- Tobias Kießling
+- Hartmut Buhmann
+- Laurens W. Molenkamp
+
+We thank Domenico Di Sante, Giorgio Sangiovanni, Björn Trauzettel, Florian Goth,
+and Fakher Assaad for feedback and support at various stages of the project.
+
+We acknowledge financial support from the Deutsche Forschungsgemeinschaft (DFG,
+German Research Foundation) in the project SFB 1170 *ToCoTronics* and in the
+Würzburg-Dresden Cluster of Excellence on Complexity and Topology in Quantum
+Matter *ct.qmat* (EXC 2147), and from the Free State of Bavaria for the
+Institute for Topological Insulators.
+
+
+Crediting us
+------------
+
+If you use kdotpy, we encourage you to credit our work as you would do with any
+scientific work. Please cite us as follows:
+> W. Beugeling, F. Bayer, C. Berger, J. Böttcher, L. Bovkun, C. Fuchs, 
+> M. Hofer, S. Shamim, M. Siebert, L.-X. Wang, E. M. Hankiewicz, T. Kießling,
+> H. Buhmann, and L. W. Molenkamp,
+> "kdotpy: k·p theory on a lattice for simulating semiconductor band structures",
+> arXiv: 2407.12651 (2024)
+
+We also encourage you to show the kdotpy logo with graphics you present, for
+example in oral presentations and on posters.
+
+For detailed instructions, please refer to the document `CITATION.md` in the
+repository.
+
+
+Contributing
+============
+
+We encourage interaction (bug reports, suggestions, etc.) via the issue tracker
+of the repository:
+https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy/-/issues
+
+We can also be reached by e-mail at `kdotpy@uni-wuerzburg.de`.
+
+
+Becoming a contributor
+----------------------
+
+For bug reports, suggestions, and criticisms just let us know via the issue
+tracker or by e-mail.
+
+We're also looking for enthusiastic people who want to join our Developer Team.
+If you're interested in joining, please don't hesitate to let us know.
+
+For information on what we expect from contributors, please note the terms
+stated in `CONTRIBUTING.md` in the repository.
+
+
+License
+=======
+
+kdotpy is licensed under the GNU General Public License, version 3.
+
+> Copyright (C) 2024 The kdotpy collaboration
+>
+> kdotpy is free software: you can redistribute it and/or modify it under the
+> terms of the GNU General Public License as published by the Free Software
+> Foundation, version 3.
+>
+> kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+> WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+> A PARTICULAR PURPOSE.  See the GNU General Public License for more details.
+>
+> You should have received a copy of the GNU General Public License along with
+> kdotpy.  If not, see <https://www.gnu.org/licenses/>.
+
+A copy of the GNU General Public License is included as the file `LICENSE` in
+the kdotpy repository. Additional terms under Section 7 of the GNU General
+Public License, version 3, are stated in the file `LICENSE.additional`.
+
+
+Contact
+=======
+
+e-mail: kdotpy@uni-wuerzburg.de
+
+website: https://kdotpy.physik.uni-wuerzburg.de
+
+Git repository: https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy
+
diff --git a/kdotpy-v1.0.0/changelog.txt b/kdotpy-v1.0.0/changelog.txt
new file mode 100644
index 0000000000000000000000000000000000000000..8d0917af8a48f2d6ceacfb5d481ba23c74563226
--- /dev/null
+++ b/kdotpy-v1.0.0/changelog.txt
@@ -0,0 +1,1454 @@
+Change log for kdotpy
+=====================
+
+
+v1.0.0 (official release) (2024-07-18)
+--------------------------------------
+
+(changes since v0.95)
+
+Bug fixes:
+- Optimize eigenvalue stitching
+- Fix incorrect union and intersection operations on (energy) intervals
+- Workaround for random matplotlib bug involving contour labels
+- Fix problem with inadvertently changed property obsids in DiagDataPoint
+- Use correct length scaling when calculating derivatives of band structure
+parameters exactly (dz = 0)
+- Fix problems with colours in legends
+- Fix problem with ETransform with plotstyle normal
+- Catch rare case for kdotpy 1d where list of wave function locations is empty
+- Fix bug in 19-point extremum solver (3D)
+- Fix error in finding CNP when band characters are unavailable
+- Raise warning about using CuPy solver with kdotpy 2d
+- Fix empty unit string in legend for dual observable
+- Disable BHZ plot if figure is absent
+- Fix problem with extrema for spherical coordinates
+- Fix handling of radians as angular units for extrema
+- Do not reject valid input in imported colormaps
+- Fix inconsistent composition of columns in transitions table
+- Fix error in adiabatic band alignment if zero energy is undefined
+- Fix units in transitions output
+
+Implementation changes:
+- Packaging: turn kdotpy into a package that can be installed with pip
+- Update version requirements for Python, NumPy, SciPy, and Matplotlib; update
+code accordingly
+- Replace shell scripts by Python implementations (increase OS compatibility)
+- Clean up unused code; delete unused variables and function arguments
+- Restructure program, simplify internal dependencies (imports)
+- Improved plot customization: Use Matplotlib style files instead of
+matplotlibrc; use rcparams more consistently
+- Improved colormap handling
+- Streamline csv output: Separate construction and writing of data
+- Improved, but stricter checking on CNP
+- Optimize band alignment
+- Remove restrictions on broadening; enable B-dependent broadening for
+self-consistent Hartree in LL mode
+- Change some default configuration settings
+- Remove deprecated command-line arguments
+
+New features:
+- Material parameter files: Material parameters are no longer hardcoded, but
+provided by the user as separate files in the configuration directory (more
+details in Wiki)
+- Selfconsistent Hartree with full diagonalization (dispersion and LL mode)
+- Support for JAX eigh diagsolver
+- Band alignment succeeds without CNP being defined
+- Doxygen generated developer documentation
+- Infrastructure for configuration key deprecation
+
+
+v1.0.0rc3 (release candidate) (2024-07-17)
+------------------------------------------
+Bug fixes:
+- Correct project metadata
+
+New features:
+- Activate Doxygen pages
+
+
+v1.0.0rc2 (release candidate) (2024-07-17)
+------------------------------------------
+Bug fixes:
+- Fix error in adiabatic band alignment if zero energy is undefined
+- Fix units in transitions output
+
+Implementation changes:
+- Streamline some code differences between source code and article
+- Update project metadata
+
+New features:
+- Infrastructure for configuration key deprecation
+
+
+v1.0.0rc1 (release candidate) (2024-05-22)
+------------------------------------------
+Bug fixes:
+- Fix inconsistent composition of columns in transitions table
+
+Implementation changes:
+- Update authors, acknowledgements, citation instructions
+- Delete tests for 'symbolic objects'
+- Fix some out-of-date comments, docstrings, and helpfile entries
+
+
+v1.0.0b2 (beta) (2024-04-23)
+----------------------------
+Bug fixes:
+- Optimize eigenvalue stitching
+- Fix incorrect union and intersection operations on (energy) intervals
+- Workaround for random matplotlib bug involving contour labels
+- Fix problem with inadvertently changed property obsids in DiagDataPoint
+- Use correct length scaling when calculating derivatives of band structure
+parameters exactly (dz = 0)
+- Fix problems with colours in legends
+- Fix problem with ETransform with plotstyle normal
+- Catch rare case for kdotpy 1d where list of wave function locations is empty
+- Fix bug in 19-point extremum solver (3D)
+- Fix error in finding CNP when band characters are unavailable
+- Raise warning about using CuPy solver with kdotpy 2d
+- Fix empty unit string in legend for dual observable
+- Disable BHZ plot if figure is absent
+- Fix problem with extrema for spherical coordinates
+- Fix handling of radians as angular units for extrema
+- Do not reject valid input in imported colormaps
+
+Implementation changes:
+- Packaging: turn kdotpy into a package that can be installed with pip
+- Update version requirements for Python, NumPy, SciPy, and Matplotlib; update
+code accordingly
+- Replace shell scripts by Python implementations (increase OS compatibility)
+- Clean up unused code; delete unused variables and function arguments
+- Restructure program, simplify internal dependencies (imports)
+- Improved plot customization: Use Matplotlib style files instead of
+matplotlibrc; use rcparams more consistently
+- Improved colormap handling
+- Streamline csv output: Separate construction and writing of data
+- Improved, but stricter checking on CNP
+- Optimize band alignment
+- Remove restrictions on broadening; enable B-dependent broadening for
+self-consistent Hartree in LL mode
+- Change some default configuration settings
+- Remove deprecated command-line arguments
+
+New features:
+- Material parameter files: Material parameters are no longer hardcoded, but
+provided by the user as separate files in the configuration directory (more
+details in Wiki)
+- Selfconsistent Hartree with full diagonalization (dispersion and LL mode)
+- Support for JAX eigh diagsolver
+- Band alignment succeeds without CNP being defined
+- Doxygen generated developer documentation
+
+
+v0.95  (2023-10-16)
+-------------------
+Bug fixes:
+*  Silence a warning for dispersion derivatives
+*  Fix legend label for observable band index
+
+Implementation changes:
+*  Legacy self-consistent solvers have been removed
+*  Use notation EF and EF0 for Fermi energies at finite and zero density
+*  Change internal density values in dispersion mode to proper DOS
+*  Set density units by a configuration value, no longer from the command line
+*  Load potential before starting self-consistent solver in LL mode
+*  Apply solver options more consistently
+*  Debug output for all magnetic fields in self-consistent solver in LL mode
+
+New features:
+*  Additional colour options for plots
+
+
+v0.94  (2023-08-07)
+-------------------
+Implementation changes:
+*  Extrema calculation for polar coordinates implemented more intuitively
+
+New features:
+*  Selfconsistent Hartree calculation in LL mode
+*  Extrema solvers for cylindrical and spherical coordinates
+
+
+v0.93  (2023-06-28)
+-------------------
+Bug fixes:
+*  Handle errors in densityz calculation
+
+Implementation changes:
+*  Optimize parsing of configuration files
+*  Change how strain is entered on command line and optimize handling of strain
+
+New features:
+*  Add plots and tables for density as function of z and energy
+
+Deleted features:
+*  Input of 'strain <value> x' is no longer possible because the result is
+unexpected. Anisotropic in-plane strain will be reintroduced in a future
+version.
+
+
+v0.92  (2023-05-26)
+-------------------
+"Redesign of selfcon"
+
+Bug fixes:
+*  Make wave function plot functions more uniform, fixing argument errors
+*  Fix problems with Jacobians (for derivatives)
+*  Abort density calculation if integration element is not defined, instead of
+quitting with an exception
+*  Handle cases where Fermi energy is out of range
+*  Fix error in transitions output if refractive index is undefined
+*  In configuration file (kdotpyrc), no longer erase unknown configuration keys
+and retain order and comments
+
+Implementation changes:
+*  Generalize density calculation to more grid types / coordinate combinations
+*  Use the same default density ranges for all 'constdens' outputs
+
+New features:
+*  Complete redesign of self-consistent Hartree method (for dispersion mode
+only). This update fixes many stability issues due to a much improved algorithm
+for calculating the density as function of z.
+*  Add csv file for 'dispersion-vs-n'
+*  Optionally print momentum extension multiplier for DOS/IDOS
+
+
+v0.91  (2023-03-15)
+-------------------
+Bug fixes:
+*  Fix misinterpretation of command line in kdotpy-test in Windows
+*  Add 'python' to command line in Windows because kdotpy-xxx is not executable
+
+Implementation changes:
+*  Delete fallbacks for matplotlib colormaps viridis and cividis
+*  Calculate derivatives dE/dkx for grid of type 'x', dE/dky for 'y'
+*  Reimplement derivatives, now allow all grid types
+
+New features:
+*  Add command line configuration tool kdotpy-config
+*  Add cubic Zeeman term (not yet active in code) 
+
+
+v0.90  (2022-12-20)
+-------------------
+Bug fixes:
+*  Do not create directories when using kdotpy-batch with dryrun
+*  Fix incorrect band character labels in csv dispersion output
+*  Fix incompatibilities with newer numpy versions
+
+Implementation changes:
+*  Use minimum x resolution for all DOS output
+
+New features:
+*  Add markers periodically in 1/B in Shubnikov-de Haas plots
+*  Define isoparity operators in well and in symmetric region around well
+*  Define splittype for these modified isoparity operators
+*  Use kdotpy-test with alternative python command
+
+
+v0.89  (2022-11-22)
+-------------------
+Bug fixes:
+*  Fix scientific notation and crop small values in plot parameter text
+*  Fix incorrectly formed file names for wave function csv files
+*  Fix missing axis labels in DOS and IDOS plots
+*  Place special energies at the correct positions in dispersion-vs-n plot
+*  Fix incorrectly formatted directory names in kdotpy-batch
+
+Implementation changes:
+*  Make determination of charge neutrality point more stable against subbands
+with incorrect character labels
+*  In plot parameter text, display one-component vector as scalar
+*  Make script kdotpy-test.py executable
+*  Argument 'dosrange' (scaling for DOS and IDOS plots) now interprets values in
+the units set by 'densitynm', 'densitypcm', etc.
+*  Apply argument 'dosrange' also to dispersion-vs-n plot
+
+New features:
+*  Allow variable substitution with {}-formatting for outputid
+*  Allow grid (momentum, magnetic field) variables to be used in {}-formatted
+command-line inputs ('plottitle', 'outputid')
+*  Output of psi(z, y) as csv file (kdotpy-1d with 'plotwf')
+
+
+v0.88.1  (2022-11-04)  (hotfix)
+-------------------------------
+Bug fixes:
+*  Fix bug with composing file names (for example wave functions)
+
+
+v0.88  (2022-11-03)
+-------------------
+Bug fixes:
+*  Silence 'division by zero' warning for table output
+*  Fix misplacement of character labels in dispersion plots at integer energies
+
+Implementation changes:
+*  Add optional Python modules to XML output
+*  Change some functions requiring 'zero point' to 'base point' (less
+restrictive)
+*  Rename 'reconnect' command line argument to 'bandalign'
+*  Update version requirements for Python and packages
+
+New features:
+*  Add arguments 'showcmd+' and 'showcmd-' do kdotpy-test, changing how the
+script names are shown in the command lines
+*  Use command line argument 'bandalign' with kdotpy-2d and kdotpy-ll
+*  Make vertical spacing of character labels in dispersion plots configurable
+
+
+v0.87  (2022-10-11)
+-------------------
+Bug fixes:
+*  Fix exception for plotting an axis label
+*  Fix inconsistent location matching for wave functions, especially at zero
+
+Implementation changes:
+*  Simplification of integrated observable calculation
+*  Rename this file kdotpy-changelog.txt to changelog.txt; Gitlab will then
+recognize it as this project's changelog.
+*  Use position labels for wave function filenames
+
+New features:
+*  Combine multiple broadening functions
+*  Direct input of Berry/Chern/Hall broadening from the command line
+*  Multiple input values for targetenergy (experimental feature)
+*  Save binary files with eigenvectors in NumPy .npz or in HDF5 .h5 format
+
+
+v0.86  (2022-09-14)
+-------------------
+Bug fixes:
+*  Choose correct vector component prefix in band aligment
+*  Fix scaling issues in density plots
+*  Fermi energy is indeterminate if IDOS is identically zero
+*  Do not attempt BHZ calculation if k = 0 is not in data
+*  Fix file permission error on Windows for parallel processes, caused by 
+unnecessary rewrite of configuration file.
+*  Fix missing band character labels for wave functions at k = 0
+
+Implementation changes:
+*  Apply phase normalization for wave functions more consistently
+*  Make quantity, value, unit output for plots and tables more consistent and
+uniform.
+
+New features:
+*  Add configuration option to make wave functions real before counting nodes to
+determine subband character.
+
+
+v0.85  (2022-08-04)
+-------------------
+Bug fixes:
+*  Fix missing factor 2 in absorption calculation (transitions)
+
+Implementation changes:
+*  Reduce verbose output to a more practical amount
+*  Get version info for output.xml directly from kdotpy, not from a shell script
+*  Broadening (type thermal) is no longer implicit for dispersion mode, which
+increases consistency with LL mode
+*  Order of broadening arguments is now arbitrary
+
+New features:
+*  Logo
+
+
+v0.84  (2022-07-12)
+-------------------
+Bug fixes:
+*  Fix broadening with magnetic field dependence in postprocessing
+*  Catch unhandled errors in band alignment and continue without band alignment
+rather than abort with an exception
+*  Fix incorrect handling of in-plane magnetic fields for symbolic Hamiltonians
+(used in LL mode and for BHZ calculation)
+*  Deal with out-of-range Fermi energy more elegantly
+*  Deal with some other density calculation failures more elegantly
+*  Fix inconsistent vector components for x values in bdependence-vs-n plot
+
+
+v0.83  (2022-07-07)
+-------------------
+"Great density infrastructure cleanup 2022"
+
+Implementation changes:
+*  Code for density of states etc. has been refactored and reorganized for
+easier maintenance
+*  Separate integrated DOS calculation and subsequent methods (like determining
+Fermi energy)
+*  Integrated DOS is now stored with k, B values, energies, dimensionality, etc.
+as a single data object
+*  More uniform handling of broadening. Density functions no longer accept
+temperature as an argument for thermal broadening.
+*  More uniform and predictable definition of energy ranges
+*  Density functions no longer attempt to do band alignment when band indices
+are not provided
+*  Optimizations in postprocessing (PDF and CSV output)
+
+
+v0.82  (2022-07-04)
+-------------------
+Implementation changes:
+*  Code optimizations using dict.get()
+*  Formatting of units
+
+New features:
+*  Option 'showcmd' for kdotpy-test for showing command lines of tests
+*  Interface observable with customizable length
+
+
+v0.81  (2022-06-13)
+-------------------
+Implementation changes:
+*  Use a uniform threshold value for testing k = 0 in degeneracy splitting
+Hamiltonians
+
+New features:
+*  Temperature dependent material parameters, in particular the band gap
+*  New degeneracy splitting type sgnjz0 (sgn(Jz) at k = 0 only)
+
+
+v0.80  (2022-06-07)
+-------------------
+Bug fixes:
+*  Fix artifacts in polar plot if angles do not align with 90 degree multiples
+*  Fix exception when trying to apply scaling if broadening is not set
+*  Silence warning when calling gitv on directory without writing permission
+*  Update .gitlab-ci.yml to fix incompatibility with newer Gitlab version
+
+Implementation changes:
+*  It has become mandatory to indicate either 'ax' or 'noax'
+*  Lattice regularization demoted from command-line argument to configuration
+value.
+*  Raise error or warning if z resolution and layer thicknesses do not align
+
+
+v0.79  (2022-05-10)
+-------------------
+Bug fixes:
+*  Get new version for XML output also outside script directory
+*  Apply energy resolution also to IDOS/DOS in dispersion mode
+*  Fix misalignment of potential when centre (z = 0) is not in z grid
+*  Implement IDOS/DOS validity range more accurately
+*  Handle failed band alignment (zero momentum not in range) more properly
+
+Implementation changes:
+*  New internal handling of IDOS/DOS broadening (more flexibility in future)
+*  Further optimizations for IDOS/DOS handling
+*  Optimize dispersion csv output, significant speed increase
+
+New features:
+*  Configuration options for job monitor and for numpy (debug) output
+*  Calculate IDOS/DOS for bulk (3D)
+*  New test '2d_offset' for dispersions that do not contain zero momentum
+
+
+v0.78  (2022-03-31)
+-------------------
+Bug fixes:
+*  Fix postprocessing error if density range is not set
+*  Fix polar grid being invisible
+*  Fix error on missing band characters in extrema csv file
+*  Handle incommensurate energy and IDOS arrays for integrated observable plots
+
+Implementation changes:
+*  Change matrix type from numpy.matrix to numpy.array, following general
+recommendations. Also leads to speed increase of ~ 5%-10%.
+*  Radial ticks in polar plots also for small radii
+*  Make threshold for hiding material labels in wave function plots configurable
+*  Distinguish between Berry curvature and Chern number terminology for LL mode
+*  Change file names for Hall conductance output
+
+New features:
+*  Write units into 'byband' dispersion csv files
+*  Write data labels and units into generic postprocessing csv files (local DOS,
+Berry curvature, etc.)
+*  Simulated Chern number (Berry curvature, Hall conductance) in LL mode
+*  Integrated observable plot over all data points as multipage PDF 
+
+
+v0.77 (2022-03-09)
+------------------
+Bug fixes:
+*  For determination of LL degeneracy, use z component of magnetic field B
+*  Do not ignore potential file if it is the final command line argument
+*  Remove spurious lines in dispersion plots
+*  Fix error when setting stacking order with plotstyle 'normal'
+
+Implementation changes:
+*  Uniform implementation for defining arrays of z and y coordinates
+*  Add extrema calculation to tests
+
+New features:
+*  Add density(z) as postprocessing function (kdotpy-ll only)
+*  Rotation of material labels in wave function plots
+
+
+v0.76 (2022-02-23)
+------------------
+Bug fixes:
+*  Do not scale z values of berry-int and dsigma-de plots
+*  Signal handling in Tasks and Model structure, fixes issues with Ctrl-C, etc.
+
+Implementation changes:
+*  In Rxy plot, draw Hall slope below the data curves
+*  Option for more optimal stacking order of plot elements (dispersion etc.)
+
+New features:
+*  Allow non-exact match of wave function locations of numerical type
+*  Input of multiple potential files and multipliers for them
+
+
+v0.75 (2022-02-08)
+------------------
+Bug fixes:
+*  Fix incorrect coefficient for in-plane Zeeman matrix element (expected error
+is ~ 10^-3 meV or lower for in-plane fields ~ 1 T)
+
+Implementation changes:
+*  Optimize how by-band data is extracted from internal data structures
+DiagDataPoint and DiagData. This reduces postprocessing time dramatically for
+large amounts of data points.
+*  Adapt formatting of BHZ verbose output
+
+
+v0.74  (2022-02-01)
+-------------------
+Bug fixes:
+*  Fix incorrect LL labels in plotted wave functions in 'full' LL mode
+*  Fix momentum handling in extrema calculation; add units to extrema output
+*  Fix and update output of special energy values
+*  Apply observable property 'dimful' (multiprocessing issue on Windows)
+*  Fix issue for automatic solver configuration
+*  Fix wave function phases for full LL mode
+*  Fix missing values in overlap calculation (kdotpy-ll)
+*  Do not discard eigenvector data if symmetrization is used. This issue caused
+missing wave function output.
+*  Correct density value off by factor of 2 for dispersions on symmetric
+one-dimensional momentum grid
+*  Fix crash in numeric DOS calculation for very small number of data points
+
+Implementation changes:
+*  Replace command line argument 'vgate' by 'v_outer' and 'v_inner'
+*  Print 'tempout' folder to stderr
+*  Make precision of wave function csv output independently configurable
+*  For LL mode, output 'densz' plots for all magnetic field values, not just the
+highest value.
+*  Choose gauge for in-plane fields symmetrically in z direction
+*  Change representation of floating-point numbers (kdotpy-batch)
+*  Apply command line options 'densityrange' and 'cardens' (given as range) also
+for integrated Berry curvature in LL mode
+*  Shorten command (script path) in xml output <cmdargs> tag (optionally)
+*  Separate sub-tasks for a data point can be combined into a single task
+(mitigates problems for very large matrices exceeding 2 GB of data)
+
+New features:
+*  Plot wave functions as real functions and indicate complex phase
+(configuration option 'plot_wf_orbitals_realshift')
+*  Choose order of orbital legend in wave function plot
+*  In-plane gauge fields (kdotpy-2d only)
+*  Add Hall slope in Rxy plots (optionally)
+*  Verbose mode for tests (kdotpy-test)
+*  BHZ calculation (Löwdin expansion) at nonzero momentum
+
+
+v0.73  (2021-12-10)
+-------------------
+Bug fixes:
+*  Issue with multiple index rows in csv output using pandas
+*  Fix double-counting issue for 'interface' observables, if the integration
+intervals overlap.
+*  Optimize matplotlib output fixing compatibility issues and warnings for newer
+matplotlib versions.
+
+Implementation changes:
+*  Relabel test '2d_orient'
+
+New features:
+*  Output of energies and densities to stdout (DOS for dispersions)
+*  Enter 'kdotpy test list' for a list of test ids.
+
+
+v0.72  (2021-12-02)
+-------------------
+Bug fixes:
+* Fix Windows compatibility issues (file access, absence of 'nice' command)
+* In symbolic LL mode, do not calculate same transitions twice and ensure both
+states are within 'transitionsrange'.
+* Fix incorrect sorting of transitions by energies E_2 and for duplicate
+entries.
+* Do not show strip length scale output for LL mode.
+* Fix issue with csv output of vector data.
+* Fix some rare warning messages that were incorrect.
+
+Implementation changes:
+* Automatic solver configuration chooses known better available solvers or gives
+a hint for solvers that might perform better.
+* New Tasks and Model structure to distribute calculations more flexibly to CPU
+and GPU (using threading and multiprocessing workers).
+* LL mode 'legacy' repaired/modified; splitting is now only applied at zero
+magnetic field.
+* Add band character data also for LL mode 'full'.
+
+New features:
+* Eigensolver for Hamiltonian matrices using CUDA capable GPUs (based on
+optional cupy package).
+* Save intermediate diagonalization result in temporary folder and reload them
+for resumeable script runs. (Use with care!)
+
+
+v0.71  (2021-11-30)
+-------------------
+Bug fixes:
+* Add missing 'byband' output for kdotpy-bulk-ll
+* Fix error in wave function plots if band indices are undefined
+* Fix git version check on Windows systems
+
+Implementation changes:
+* Configure extension of stdout and stderr files in kdotpy-batch
+
+New features:
+* For calculating overlaps, allow band indices for choosing states with missing
+character label.
+* Allow plotting wave functions everywhere using 'plotwf all'
+* Add indexed observables orbital[j] and ll[j]
+* Add observables Hzeeman at 1T, Hexchange at 1T, Hexchange at ∞T
+* Add observables LL index mod 2 and LL index mod 4 (full LL mode only)
+
+
+v0.70  (2021-10-20)
+-------------------
+Bug fixes:
+* Improve data alignment in plots using Matplotlib's pcolormesh or imshow
+* Disable automatic path simplification (dispersion plots, etc.)
+* Prevent problems with configuration files in multiprocessing on Windows
+* Do not try to 'nice' processes in batch run on Windows
+* Repair kdotpy-ll.py, legacy LL mode
+* Repair kdotpy-bulk-ll.py, all LL modes
+
+Implementation changes:
+* Make transitions spectra optional
+* Parallel processing in output of transitions and spectra (performance
+improvement)
+* In density plots, define energy resolution more predictably
+* Improve speed in matching vector values, leading to faster merging of data
+grids from multiple XML files
+* Update test framework, make it compatible with pytest for automated testing in
+Gitlab.
+* New colormap for band indices
+
+New features:
+* Plot integrated observable and observable density (~ spectral function)
+* Symbolic and full Landau level modes for kdotpy-bulk-ll.py
+
+
+v0.69  (2021-08-09)
+-------------------
+Bug fixes:
+* Prevent crash if there are no optical transitions for a magnetic field point.
+* Fix misleading error message in case of empty optical transitions.
+* Fix error causing bad axis tick spacing for spectra plots.
+* Remove several small issues found with flake8
+
+Implementation changes:
+* Now considers all transition matrix elements for optical transitions. Also
+calculates refractive indices and polarimetry spectra (experimental).
+* Float output format for transitions csv files changed to %.6g for a more
+consistent number of significant digits.
+
+New features:
+* Optical transitional can be requested for a range of carrier densities.
+* Hamiltonian diagonalization solvers are now configurable. This can be used to
+improve solve times or enhance solver stability in certain use cases - mostly
+with large system sizes. Adds support for UMFPACK, PARDISO and FEAST, but
+requires additional libraries and packages for use of those features.
+
+
+v0.68  (2021-07-21)
+-------------------
+Bug fixes:
+* Correct TeX strings of observables hexch, hzeeman, hstrain.
+* Fix misplaced charge neutrality point in case of unexpected band characters.
+* Turn several repeated warnings into single ones.
+
+Implementation changes:
+* For kdotpy-2d and kdotpy-ll, use eigenvectors from dispersion calculation for
+  wave function plots/data. No longer recalculate them.
+* Improve text information in wave function plots more.
+* Use a more systematic file naming scheme for wave function csv files.
+* Update BHZ calculation.
+
+New features:
+* Allow BHZ for noncontiguous sets of A bands.
+* Allow BHZ for finite magnetic fields (experimental).
+* Allow band indices as input for BHZ.
+
+
+v0.67  (2021-06-24)
+-------------------
+Bug fixes:
+* Add wave functions to full LL mode. This missing feature was causing an error
+when trying to use full LL mode.
+* Fix missing symbols in BHZ TeX output
+
+Implementation changes:
+* Improve text in wave function plots
+* Add configuration option for matching wave function locations approximately
+* Add configuration option for doing BHZ on non-contiguous sets of A bands
+
+
+v0.66  (2021-05-27)
+-------------------
+Bug fixes:
+* Signal handling (interrupt/terminate) changed for kdotpy-batch, preventing
+orphaned processes
+* Fix several small errors in determination of band type
+* Fix incorrect signs in symbolic derivation and Berry curvature
+
+Implementation changes:
+* Command-line argument 'extrema' replaces 'localminmax' (same option, new name)
+* Determine eigenstate complex phase more predictably (for wave function plots
+and BHZ)
+* Cleaner BHZ output
+
+New features:
+* Vector components and length in plot title
+* Show phase values in wave function plots
+* Configure colour and style of BHZ dispersion plot
+* Wave function plots in LL modes, also for nonzero magnetic field
+
+
+v0.65  (2021-01-13)
+-------------------
+Bug fixes:
+*  Fix warning/error with neutral energies
+*  Fix error caused by duplicate observable (kdotpy-2d)
+*  Problems with Berry curvature plots
+
+Implementation changes:
+*  Save B dependence plot with density curves to separate file, instead of
+overwriting the plot without density curves
+
+New features:
+*  More degeneracy splitting types
+*  Command-line shortcut 'hall' for kdotpy-ll
+*  Interface-type observables also made available in kdotpy-ll
+*  Add parity and isoparity observables in x direction
+
+
+v0.64  (2020-10-27)
+-------------------
+Bug fixes:
+*  Several issues with band alignment
+*  Fix problem with density scaling
+*  Use the correct definition of observable 'isopy'
+*  Fix bug in plotting of DOS contours
+*  Path to script in tests.sh
+
+Implementation changes:
+*  Updates to wave function plots (orbital and subband basis)
+*  Updates to BHZ output (see also 'new feature' below)
+*  Rename 'Fermi energy' to 'charge neutrality point' wherever appropriate
+*  Wave function plot: Use dashdot style when real and imaginary curves
+coincide.
+*  Define degeneracy splitting as separate sparse matrix
+
+New features:
+*  Add band labels in 1D dispersion plots
+*  Potential depending on orbital or subband
+*  Table output for 1D wave functions in y
+*  Dual shaded colormaps
+*  Import colormap from file
+*  Landau diagonalization at negative magnetic fields
+*  Choose between dimensionless and -ful g factors in BHZ output
+*  Calculate Berry curvature and DOS per isoparity block
+*  Degeneracy splitting using isoparity
+
+
+v0.63  (2020-02-25)
+-------------------
+Bug fixes:
+*  Remove several small issues found with flake8
+*  Fix erroneous warning for unparsed argument
+*  Add __name__ == "__main__" test to kdotpy-merge and kdotpy-compare
+
+Implementation changes:
+*  Change method of finding band character (number of nodes) and make the
+parameters configurable.
+
+New features:
+*  Add and update in-code documentation (docstrings)
+*  New options 'kdotpy doc' and 'kdotpy test' for the master script
+*  Put csv files for wave functions into an archive (tar/targz/zip)
+
+
+v0.62  (2020-01-30)
+-------------------
+Bug fixes:
+*  Correct angular units for magnetic fields in csv output
+*  Fix formatting of first column in some csv output generated with pandas
+*  Fix syntax errors and type errors on printing vectors
+*  Fix crash on failure getting band indices
+*  Fix how momentum components are handled by Hamiltonian construction functions
+
+Implementation changes:
+*  New test in tests.sh
+*  New handling of command-line arguments. At the end of a successful run, a
+warning is given with precisely the arguments that did not do anything.
+*  Stricter checks on geometric parameters (sizes and resolutions)
+
+
+v0.61  (2020-01-29)
+-------------------
+Bug fixes:
+*  Fix incorrect sign in Hamiltonian entry C, that couples Gamma8,+1/2 and
+Gamma8,-1/2 orbitals and is proportional to dz kappa.
+*  Fix incorrect terms in Hamiltonian when BIA is combined with nonzero in-plane
+magnetic field components.
+
+Implementation changes:
+*  Slight modification in how strain is implemented and saved into the XML file
+*  Improve performance of vector grid indexing, used in symmetry test and
+symmetrization
+*  Improved behaviour and new tests in tests.sh
+
+New features:
+*  Lattice and vector transformations, that accommodate the following features.
+*  Any crystal orientation (experimental)
+*  Symmetry analysis; identification of symmetry group (experimental)
+*  Off-diagonal strain components
+
+
+v0.60.1  (2020-01-27)  (hotfix)
+-------------------------------
+Bug fixes:
+*  Correct wrong type that caused a crash in contour plot
+
+
+v0.60  (2020-01-25)
+-------------------
+Bug fixes:
+*  Fix crash on malformed regular expression
+*  Add missing variable that caused crash in band alignment for magnetic-field
+dependence in bulk
+
+Implementation changes:
+*  Code cleanup and reordering
+
+Miscellaneous:
+*  In-code documentation (partially)
+
+
+v0.59  (2020-01-14)
+-------------------
+Bug fixes:
+*  Do not plot for single data point (bulk) or in absence of data (2d
+dispersion)
+*  Catch a rare problem with extrema
+
+Implementation changes:
+*  Silence some warnings in newer versions of Python and libraries
+*  Update band labels for 2D plots
+*  Smarter tick choices for angular variables
+*  Re-format some numerical output (rounding)
+
+New features:
+*  Add OS information to XML output
+
+
+v0.58  (2019-09-20)
+-------------------
+Bug fixes:
+*  Fix phase errors for nontrivially oriented strips leading to incorrect
+angular momentum orientation (Jx, Jy, Sx, and Sy)
+*  Fix several problems with BIA
+*  Fix incorrect orientation for wave function plots (|psi(z,y)|^2)
+*  For BHZ, include also non-axial terms and do not suppress off-diagonal k^2
+terms in the output
+*  Invalid observables must be encoded by zero matrix, not the value 0
+
+Implementation changes:
+*  Input of width and width resolution from command line
+*  Wave function plot (|psi(z,y)|^2) output file format
+
+New features:
+*  In-plane magnetic fields (orbital effect)
+*  Wave function plot (|psi(z,y)|^2) options
+
+
+v0.57  (2019-09-02)
+-------------------
+Bug fixes:
+*  Fix incorrect type in density range
+*  Set confinement to zero when periodic boundary conditions are used
+
+New features:
+*  Strip orientation
+*  Unit headers for dispersion csv output
+*  Output styles for observable and unit headers
+
+
+v0.56.3  (2019-08-06)
+---------------------
+Implementation changes:
+*  Rename files aux.py to auxil.py for Windows file system compatibility
+
+
+v0.56.1 and v0.56.2  (2019-08-02)  (hotfix)
+-------------------------------------------
+Bug fixes:
+*  Overflow error in table output
+*  Fix bad colorbars (matplotlib version incompatibility)
+
+
+v0.56  (2019-08-02)
+-------------------
+Bug fixes:
+*  Column order in extrema csv output
+*  Reflection of cylindrical coordinates
+*  Switch to full LL mode if strain is applied in x direction
+
+Implementation changes:
+*  Several updates to colour scales, for example manual limits
+*  Update master script
+
+New features:
+*  Read and write configuration to xml data file
+*  Dual colour scales for LL index together with spin
+
+
+v0.55  (2019-07-18)
+-------------------
+Bug fixes:
+*  Incorrect transition matrix (transposition)
+*  Error in band alignment when zero point is absent
+*  Missing column headers in 2d csv output
+
+Implementation changes:
+*  Streamline transitions, in particular output (plot and csv) and configuration
+
+New features:
+*  Energy shift (automatic centering at charge neutrality or manual shift)
+*  Absorption plots for transitions
+*  Energy-at-constant-density csv output for LL mode
+*  Show suggestions for invalid configuration options
+
+
+v0.54  (2019-07-03)
+-------------------
+Bug fixes:
+*  Incorrect band indices for low Landau levels (still) and crash in some cases
+*  Problem with vector plot
+
+
+v0.53  (2019-06-28)
+-------------------
+Bug fixes:
+*  Incorrect band indices for low Landau levels
+*  Missing band characters not stored correctly (xml output)
+*  Fix error at trying to calculate wave function in a single point
+*  Accept alias observables on command line (plots)
+
+Implementation changes:
+*  Use a different formula for occupation in transition rate
+*  Separate raw transition rate and dressed one (accounting for occupancy and
+density)
+
+New features:
+*  Interface observables
+*  Configuration options for transition plots
+
+
+v0.52  (2019-06-19)
+-------------------
+Bug fixes:
+*  Fix problems with csv output using pandas
+*  Fix incorrect passing of command line arguments by kdotpy (main script) and
+kdotpy-batch
+*  Allow bulk mode calculation without zero point
+
+Implementation changes:
+*  Turn observables into class objects
+*  Put band alignment algorithm in a more clearly defined framework. This update
+leads to better results (fewer artifacts).
+*  Apply option 'dosrange' also to 'bdependence-vs-n' plot
+
+New features:
+*  Manual band alignment
+*  Helical 'splitting' Hamiltonian
+*  Berry curvature in bulk mode
+*  Vector plots for Berry curvature
+*  Force legacy or full LL mode
+
+
+v0.51  (2019-05-13)
+-------------------
+Bug fixes:
+*  Fix non-output of csv output of observables by band
+*  Suppress confinement warning when reading data file
+
+Implementation changes:
+*  Rename output-merge.py, output-compare.py, and batch.py to kdotpy-merge.py,
+kdotpy-compare.py, and kdotpy-batch.py and make them executable
+*  Add a master script named kdotpy
+*  Make IO of files independent of working directory, so that kdotpy can be
+called from anywhere
+*  Minor updates to format of some csv output
+
+New features:
+*  Write merged csv output if desired (kdotpy-merge)
+*  Sort data before doing reconnect (kdotpy-merge)
+
+
+v0.50  (2019-05-02)
+-------------------
+Bug fixes:
+*  Re-add missing Hamiltonian function
+*  Fix duplicate column headings for pandas output
+*  Scan script directory, not working directory for version info
+
+Implementation changes:
+*  Slightly different column headers for wave function csv output
+*  Use maximum number of CPUs by default
+
+New features:
+*  Load matplotlibrc from a custom location
+
+
+v0.49  (2019-04-24)
+-------------------
+Name change to kdotpy:
+*  Change script names
+*  Relocate configuration file
+*  Add a function to automatically import old configuration file
+
+Bug fixes:
+*  Quoted table headings (csv output)
+
+
+v0.48 and v0.48.1 (2019-04-16)
+------------------------------
+Bug fixes:
+*  Fix missing table subheadings (units)
+*  Exception and signal handling in parallelized runs
+
+Implementation changes:
+*  Introduce @{variable} pattern for batch commands (with batch.py)
+
+New features:
+*  Integrated transition amplitudes (table output)
+*  Table output for wave functions (2D)
+
+
+v0.47  (2019-04-10)
+-------------------
+Python 2 to Python 3 conversion:
+*  Conversion of the full codebase to Python 3, compatibility with Python 2
+abandoned
+*  Major cleanup by rearranging code, simplifying constructs, removing unused
+dependencies, etc.
+
+Bug fixes:
+*  Several missing variables, found by automated review of the code
+*  Better handling of interrupts during parallel computation
+
+Implementation changes:
+*  Optimization of table output (csv files)
+*  Output directory handling for output-merge and output-compare
+
+
+v0.46  (2019-03-12)
+-------------------
+Bug fixes:
+*  Several updates to band alignment algorithm; fixes many 'seemingly random'
+failures of this algorithm
+*  Silence some spurious warnings
+*  Fix argument error in Berry curvature calculation (momentum space)
+*  Fix several problems with csv output
+
+Implementation changes:
+*  Postprocessing actions now in a separate source file
+*  Code cleanup: Some unused functions were removed
+*  Interpolate DOS and Berry plots for higher resolution output
+*  Distinguish between carrier density (1/nm^2) and charge density (e/nm^2)
+
+New features:
+*  Save momentum dimension to xml file; no longer save irrelevant variables
+*  Transitions for full LL mode
+*  Configurable figure properties: Size, margins, legends, ticks, axes labels
+(units), titles, and other text elements
+*  Display spins in 2D dispersion plots
+*  Background density per layer; selfconsistent calculation (experimental)
+*  DOS calculations for 1D (ribbon geometry)
+*  Separate setting for temperature used in thermal broadening (for DOS)
+
+Deleted features:
+*  External diagonalization using C++ program based on Armadillo library
+
+
+v0.45  (2019-01-30)
+-------------------
+Bug fixes:
+*  Several minor issues with band alignment algorithm
+*  Calculate appropriate quantity for transition amplitude
+*  Fix problems with incorrect scaling in density plots
+
+Implementation changes:
+*  Define red-gray-blue color scale as a proper colormap
+*  Change some default colormaps
+
+New features:
+*  Output transitions to csv file
+*  Framework for configuration file ($HOME/.hgmnte/hgmnterc)
+
+
+v0.44 and v0.44.1 (2018-12-06)
+------------------------------
+Bug fixes:
+*  Increase lower limit of number of states in LL calculation,
+decreasing the chance of obtaining garbage
+*  Catch more deviations in self-consistent Hartree
+*  Fix various bugs triggered by failing band alignment
+
+Implementation changes:
+*  Improved band alignment algorithm (band indices)
+
+New features:
+*  Print densities
+*  Set charge neutrality at a specific energy (experimental)
+
+
+v0.43  (2018-11-14)
+-------------------
+Bug fixes:
+*  Mixed up DOS and IDOS values in tables of DOS and IDOS by band
+*  Incorrect symmetry transformation (for spin) in symmetrization
+
+New features:
+*  Total in table of DOS and IDOS by band
+*  Show spin arrows in dispersion plots
+
+
+v0.42  (2018-11-13)
+-------------------
+Bug fixes:
+*  Fix an error in the nine-point extremum solver, singular case
+*  Fix crash in 2D plot by using finer contour spacing
+*  Make band indices calculation more reliable; this eliminates
+artifacts in dispersion plots
+*  Fix bug in calculation of neutral energy
+*  Seek Fermi energy in a larger range; this reduces the chance of
+failure for the self-consistent method
+
+Implementation changes:
+*  Re-implementation of density units: Distinguish between 1/nm^2,
+e/nm^2, 1/cm^2, and e/cm^2
+*  Improved local DOS plot, using interpolation of the dispersion
+*  New colour maps in some plots
+*  Band indices calculated more
+
+New features:
+*  Extrema for 3D
+*  Bulk inversion asymmetry for 1D
+*  DOS separated by band
+
+
+v0.41  (2018-10-26)
+-------------------
+Bug fixes:
+*  2D dispersion plot in bulk mode, yz and xz planes
+
+Implementation changes:
+*  Some changes to bulk LL script in view of unification
+
+
+v0.40  (2018-10-19)
+-------------------
+Bug fixes:
+*  Fix an error in extrema finding combined with symmetrization
+*  Remove some spurious (interface) term from Hamiltonian
+*  Units in DOS plots
+
+Implementation changes:
+*  Unification: Merge scripts as to reduce the total number of them
+*  Symmetry test now deals properly with isolated degeneracies
+
+New features:
+*  Re-implementation of bulk-inversion asymmetry; also for 2D
+
+
+v0.39  (2018-09-28)
+-------------------
+Bug fixes:
+*  Bug on reading number of orbitals using 'norb 6' or 'norb 8'
+*  Some cosmetic improvements of plot titles
+*  Extrema output failure in combination with symmetrization
+
+Implementation changes:
+*  Plot command output-merge.py now reads parameters from XML files,
+compares the values, and raises a warning if they are different
+
+New features:
+*  Potential along y direction in 1D (very basic implementation)
+*  Calculate wave functions at multiple positions (2D)
+
+
+v0.38  (2018-09-06)
+-------------------
+Bug fixes:
+*  Issues with indexed colours
+*  Incorrectly labelled horizontal axis in B-dependence plots
+*  Error in non-axial term in Hamiltonian (1D, nonzero magnetic field)
+*  Solve an exception when k = 0 is not in the data set
+
+Implementation changes:
+*  Plot title formatting now follows Python's string format() 'language'
+
+New features:
+*  Set plot title position
+*  With output-merge.py, allow 'sticking' compatible VectorGrids together
+
+
+v0.37  (2018-08-29)
+-------------------
+Bug fixes:
+*  Incorrect representations of zero momentum in symmetrization
+*  Issues with colormaps
+*  Observables: Symmetrization of yjz, displayed expressions for sx and sy
+
+Implementation changes:
+*  In output-merge.py, be less strict about presence of VectorGrid
+*  Remove dedicated BHZ script and merge functionality into hgmnte-2d.py
+*  Basis order in BHZ calculation
+*  Abort if self-consistent calculation fails, instead of raising a warning and
+continuing
+*  For full LL calculation, lift degeneracy at B = 0 for the purpose of
+diagonalization
+
+New features:
+*  Reconnect bands in output-merge.py
+*  Accept archives (.tar.gz) and compressed (.xml.gz) data files in
+output-merge.py
+*  Set horizontal plot range
+*  IPR observables
+
+
+v0.36  (2018-08-24)
+-------------------
+Bug fixes:
+*  Indexed colors, especially affecting full LL calculation
+*  Do not require VectorGrid for plotting; this solves the problem of not being
+able to plot from older data files
+*  Fix incorrectly placed extrema in 2D
+*  Overlaps not being calculated in 1D
+*  Incorrect magnetic field axis in 1D
+*  Error in observable 'jy'
+*  Solve crash in BHZ calculation
+
+Implementation changes:
+*  Significant performance improvement (smaller RAM and CPU load) by a more 
+efficient implementation of the calculation of observables
+*  Save VectorGrid in XML data file
+
+New features:
+*  Define density and Berry (sigma_H) broadening independently
+*  Plot integrated Berry curvature (sigma_H) and its derivative as function of
+energy and density
+*  Dimensionfull observables; for example, observable 'y' in length units (nm)
+*  Symmetry tests and automatic extension in momentum space by symmetrization,
+including replots using output-merge.py
+*  Alternative progress monitor (estimated time of completion)
+
+
+v0.35.1  (2018-08-01)  (hotfix)
+-------------------------------
+Bug fix:
+*  Fix error in Hartree 1D when python != python2
+
+
+v0.35  (2018-08-01)
+-------------------
+Implementation changes:
+*  Show units in csv output files
+
+New features:
+*  Full LL calculation, including non-perpendicular magnetic field
+*  Input momentum and magnetic field in angular representation in terms of
+cartesian vector components
+*  Output DOS in csv format
+*  Apply Hartree result from 2D to 1D
+
+
+v0.34  (2018-07-24)
+-------------------
+Bug fixes:
+*  Band indexing in bulk mode
+
+New features:
+*  Per-band 2D output
+*  Density of states, self-consistent Hartree, etc. for Landau levels
+*  Berry curvature in magnetic field
+*  Quadratic stepping for ranges (0 1 4 9 ...)
+*  Transition amplitudes
+
+Implementation changes:
+*  Berry curvature
+*  Vector quantities (momentum, magnetic field) now defined as new class
+*  Add and rename some observables (proper and total spin)
+*  Improved algorithm for density of states (fixes discretization artifacts)
+*  More stable self-consistent Hartree
+
+Deleted features:
+*  Momentum combination 'k kperp kphi'
+
+
+v0.33  (2018-05-23)
+-------------------
+Implementation changes:
+*  Make band connection algorithm more robust
+*  Relabelling of bands in bulk mode
+*  Different values for HgMnTe band gap (as function of Mn concentration)
+
+New features:
+*  Batch processing (may not yet be accessible from biscuit)
+*  Separate csv data files for subbands in 2D dispersion
+*  Contour plots for bulk
+
+
+v0.32  (2018-04-19)
+-------------------
+Bug fixes:
+*  Handle missing colormap (naming incompatibility between matplotlib versions).
+*  Misnamed variable in wave function plot.
+
+New features:
+*  Connecting data points over 2D grid
+*  Contour plots of band energies
+*  Berry curvature as observable
+
+
+v0.31 (2018-04-09)
+------------------
+Implementation changes:
+*  Adapt internal data structure for passing around diagonalization data.
+This code has become cleaner and and a more extensible framework.
+
+New features:
+*  Nicer plots. The algorithm connecting data points is now much more reliable. 
+One can also plot coloured curves now.
+*  Bulk Landau level calculation (sample with infinite z extent).
+
+Deleted components:
+*  Algorithm to determine and apply a shift on the E1 band. This function has 
+turned out to be unnecessary, and using it probably leads to more questions than 
+answers.
+
+
+v0.30 (2018-03-19)
+------------------
+Bug fixes:
+*  Fix undefined variable bug in hgmnte-2d and hgmnte-ll.
+*  Reset magnetic field after calculation overlap at B = 0.
+*  Adapt several error/warning messages.
+
+New features:
+*  Plot wave functions as function of y; separate by orbital or subband
+components.
+
+
+v0.29 (2018-03-12)
+------------------
+Bug fixes:
+*  Incorrect factor in LL Hamiltonian.
+*  Several spurious warnings have been silenced.
+
+New features:
+*  Print band minima and maxima (local or global extrema)
+*  In bulk, probe momenta along a Brillouin zone direction [klm]
+*  Bulk inversion asymmetry (bulk only)
+*  Renormalization of six-orbital model, optionally disable
+   (NOTE: Issues may arise at the interfaces; these will be addressed in a 
+   future version.)
+*  Vacuum as a (pseudo)material
+*  Extend options of 'subband colouring'
+
+
+v0.28 (2018-02-12)
+------------------
+Implementation changes:
+*  'Separate' wave function plots are no longer separate files, but separate 
+pages inside a single PDF file.
+
+New features:
+*  Eight-orbital Kane model. The six-orbital model can still be chosen 
+optionally. In order to avoid confusion about which model is used, it is 
+required to choose either one explicitly.
+*  Updates to Hartree calculation. One can also enter the top and bottom 
+electric field as input parameter.
+*  Calculation of Berry curvature.
+*  Axially non-symmetric terms (except for Landau-level calculation).
+*  Overlap of the wave functions at nonzero momentum with the lowest subband 
+wave functions at zero momentum.
+
+
+v0.27  (2018-01-15)
+-------------------
+Bug fixes:
+*  Absent observables causing crash in plot functions
+
+Implementation changes:
+*  The file names 'table' and 'plot' have been renamed to 'dispersion' or 
+'bdependence', whichever applies.
+
+New features:
+*  Self-consistent Hartree calculation. The current implementation is 
+sufficiently stable and converges in most sensible situations. The algorithm is 
+implemented only in the 2D geometry without magnetic field, at this moment.
+*  Plots of density of states, integrated density of states, and charge density
+as function of z.
+
+
+v0.26  (2017-12-13)
+-------------------
+Bug fixes:
+*  Erroneous warning on band character in wave function plots.
+*  Some messages were still erroneously written to stdout (partially).
+*  Some slight adjustments to material parameters.
+
+Implementation changes:
+*  The material and layer properties in the XML output have changed, following 
+the new material and layer handling (see below).
+
+New features:
+*  New material handling. Now, it should be easier to define the properties of 
+more materials. The implementation decouples the notion of material and layer 
+completely, which allows also the following new features.
+*  Layer structure. Now, it is possible to define an arbitrary 'layer stack', 
+with any number of layers, each with independent material (composition) and 
+thickness.
+*  Plot and/or write material parameters as function of z; related to the new 
+'layer stack' implementation.
+
+
+v0.25  (2017-11-23)
+-------------------
+Bug fixes:
+*  Fix crash (on SyntaxError) of the magnetic-field dependence (and LL) scripts.
+*  Some warnings have been adapted
+
+New features:
+*  Bulk calculations
+*  Enable BHZ plots away from kx axis
+*  Raise warnings for meaningless command-line arguments
+
+
+v0.24  (2017-11-09) 
+-------------------
+First publicly available version on server
+
+
+Note
+====
+Until 2019-04-24 (prior to v0.49) known as 'hgmnte-tb' or 'hgmnte'
+
+
diff --git a/kdotpy-v1.0.0/docs/logo/kdotpy-logo-200.png b/kdotpy-v1.0.0/docs/logo/kdotpy-logo-200.png
new file mode 100644
index 0000000000000000000000000000000000000000..73aea4ab63ee8c0ec69696e0efd4550fc98f5649
Binary files /dev/null and b/kdotpy-v1.0.0/docs/logo/kdotpy-logo-200.png differ
diff --git a/kdotpy-v1.0.0/docs/logo/kdotpy-logo-32.png b/kdotpy-v1.0.0/docs/logo/kdotpy-logo-32.png
new file mode 100644
index 0000000000000000000000000000000000000000..ea02709f7ab09193d84b060faf133ba019cc445e
Binary files /dev/null and b/kdotpy-v1.0.0/docs/logo/kdotpy-logo-32.png differ
diff --git a/kdotpy-v1.0.0/docs/logo/kdotpy-logo-64.png b/kdotpy-v1.0.0/docs/logo/kdotpy-logo-64.png
new file mode 100644
index 0000000000000000000000000000000000000000..6608936e2ef61159508f31c9e542d686a8f66ba8
Binary files /dev/null and b/kdotpy-v1.0.0/docs/logo/kdotpy-logo-64.png differ
diff --git a/kdotpy-v1.0.0/docs/logo/kdotpy-logo-black-200.png b/kdotpy-v1.0.0/docs/logo/kdotpy-logo-black-200.png
new file mode 100644
index 0000000000000000000000000000000000000000..f16a335171415f5c3d7fea56d8d0e768416cae53
Binary files /dev/null and b/kdotpy-v1.0.0/docs/logo/kdotpy-logo-black-200.png differ
diff --git a/kdotpy-v1.0.0/docs/logo/kdotpy-logo-black.svg b/kdotpy-v1.0.0/docs/logo/kdotpy-logo-black.svg
new file mode 100644
index 0000000000000000000000000000000000000000..429f36e19a47358cc080328c41584b149d997a9f
--- /dev/null
+++ b/kdotpy-v1.0.0/docs/logo/kdotpy-logo-black.svg
@@ -0,0 +1,108 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+   width="60mm"
+   height="60mm"
+   viewBox="0 0 60 60"
+   version="1.1"
+   id="svg8"
+   inkscape:version="1.1.2 (0a00cf5339, 2022-02-04)"
+   sodipodi:docname="kdotpy-logo-black.svg"
+   inkscape:export-filename="/home/wbeugeling/Documents/kdotpy-logo-wb-64.png"
+   inkscape:export-xdpi="27.093334"
+   inkscape:export-ydpi="27.093334"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:dc="http://purl.org/dc/elements/1.1/">
+  <defs
+     id="defs2" />
+  <sodipodi:namedview
+     id="base"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageopacity="0.0"
+     inkscape:pageshadow="2"
+     inkscape:zoom="1.4"
+     inkscape:cx="56.785714"
+     inkscape:cy="80.357143"
+     inkscape:document-units="mm"
+     inkscape:current-layer="layer1"
+     inkscape:document-rotation="0"
+     showgrid="true"
+     inkscape:pagecheckerboard="false"
+     inkscape:snap-bbox="true"
+     inkscape:snap-page="true"
+     inkscape:snap-bbox-midpoints="true"
+     inkscape:window-width="1920"
+     inkscape:window-height="1132"
+     inkscape:window-x="0"
+     inkscape:window-y="0"
+     inkscape:window-maximized="1"
+     inkscape:snap-text-baseline="true"
+     inkscape:snap-intersection-paths="false"
+     inkscape:snap-smooth-nodes="true">
+    <inkscape:grid
+       type="xygrid"
+       id="grid841"
+       units="mm"
+       spacingx="1.9999999"
+       spacingy="1.9999999"
+       originx="0"
+       originy="0" />
+    <inkscape:grid
+       type="xygrid"
+       id="grid1051"
+       units="mm"
+       spacingx="0.5"
+       spacingy="0.5"
+       color="#ff3f3f"
+       opacity="0.1254902"
+       empcolor="#ff3f3f"
+       empopacity="0.25098039"
+       empspacing="4" />
+  </sodipodi:namedview>
+  <metadata
+     id="metadata5">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <g
+     inkscape:label="Layer 1"
+     inkscape:groupmode="layer"
+     id="layer1">
+    <g
+       aria-label="k py"
+       id="text837"
+       style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:16.9333px;line-height:1.25;font-family:'Latin Modern Sans';-inkscape-font-specification:'Latin Modern Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583">
+      <path
+         d="m 18.623799,35.4412 c 0,0 0,-0.2286 -0.254,-0.5588 l -4.4196,-5.7912 3.9116,-3.6322 c 0.3048,-0.2794 0.381,-0.3556 0.381,-0.5588 0,-0.5334 -0.508,-0.5334 -0.8636,-0.5334 h -1.4732 c -0.4572,0 -0.9398,0 -1.524,0.5334 l -3.6576,3.4036 v -8.7122 c 0,-0.8128 -0.1778,-1.2192 -1.2191995,-1.2192 h -0.5334 c -0.9144,0 -1.2192,0.2794 -1.2192,1.2192 v 15.1892 c 0,0.9144 0.2794,1.2192 1.2192,1.2192 h 0.4572 c 0.9651995,0 1.2191995,-0.3302 1.2191995,-1.2192 v -2.6416 c 0.4064,-0.3556 0.7874,-0.7366 1.1938,-1.0922 l 3.302,4.318 c 0.4318,0.5588 0.6096,0.635 1.3462,0.635 h 1.2192 c 0.3556,0 0.9144,0 0.9144,-0.5588 z"
+         style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:25.4px;font-family:'Latin Modern Sans';-inkscape-font-specification:'Latin Modern Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;fill:#000000;fill-opacity:1;stroke-width:0.264583"
+         id="path865" />
+      <path
+         d="m 42.09332,30.1326 c 0,-1.2446 0,-6.0452 -4.5974,-6.0452 -1.4986,0 -2.8448,0.6096 -3.7592,1.4732 0,-0.9398 -0.3556,-1.1938 -1.2192,-1.1938 h -0.9398 c -0.9144,0 -1.2192,0.2794 -1.2192,1.2192 v 14.1224 c 0,0.9144 0.2794,1.2192 1.2192,1.2192 h 1.016 c 0.9652,0 1.2192,-0.3302 1.2192,-1.2192 v -4.699 c 1.143,1.27 2.54,1.27 2.8956,1.27 5.3848,0 5.3848,-4.8768 5.3848,-6.1468 z m -3.4544,0.0508 c 0,1.016 0,4.2672 -2.8702,4.2672 -0.5842,0 -1.0414,-0.2032 -1.3462,-0.4318 -0.6096,-0.4318 -0.6096,-0.5588 -0.6096,-1.016 v -6.096 c 0.2794,-0.254 1.1176,-0.8382 2.2098,-0.8382 2.5908,0 2.6162,2.9972 2.6162,4.1148 z"
+         style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:25.4px;font-family:'Latin Modern Sans';-inkscape-font-specification:'Latin Modern Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;fill:#000000;fill-opacity:1;stroke-width:0.264583"
+         id="path867" />
+      <path
+         d="m 54.285287,25.0526 c 0,-0.6858 -0.6858,-0.6858 -1.016,-0.6858 h -0.5588 c -0.4064,0 -0.9652,0 -1.3462,0.5842 -0.0762,0.127 -2.1336,5.7404 -2.2606,7.4422 h -0.0254 c -0.127,-1.2446 -1.1684,-3.6068 -2.2606,-6.0452 -0.7112,-1.6002 -0.889,-1.9812 -2.159,-1.9812 h -0.6604 c -0.3556,0 -0.9906,0 -0.9906,0.6604 0,0.1016 0.0762,0.2794 0.127,0.4064 L 47.935287,36 c -0.2032,0.5334 -0.2794,0.8128 -0.3556,1.0668 -0.3048,0.8382 -0.7366,1.9812 -2.1082,1.9812 -0.8382,0 -1.4224,-0.3556 -1.7018,-0.5334 -0.127,-0.0762 -0.1778,-0.1016 -0.254,-0.1016 -0.127,0 -0.3302,0.0762 -0.3302,0.381 0,0.2032 0.1524,1.9558 0.2286,2.0574 0.2286,0.3048 1.6002,0.3556 2.0066,0.3556 3.1496,0 4.3942,-2.9972 4.5974,-3.6068 l 4.1402,-12.0396 c 0.127,-0.3302 0.127,-0.381 0.127,-0.508 z"
+         style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:25.4px;font-family:'Latin Modern Sans';-inkscape-font-specification:'Latin Modern Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;fill:#000000;fill-opacity:1;stroke-width:0.264583"
+         id="path869" />
+    </g>
+    <ellipse
+       style="opacity:1;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.396875;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+       id="path839"
+       cy="29.999998"
+       cx="24"
+       rx="2.5"
+       ry="2.4999981" />
+  </g>
+</svg>
diff --git a/kdotpy-v1.0.0/docs/logo/kdotpy-logo-circled-200.png b/kdotpy-v1.0.0/docs/logo/kdotpy-logo-circled-200.png
new file mode 100644
index 0000000000000000000000000000000000000000..b8be41389f418c475f4d3eef867ecb7574d8313f
Binary files /dev/null and b/kdotpy-v1.0.0/docs/logo/kdotpy-logo-circled-200.png differ
diff --git a/kdotpy-v1.0.0/docs/logo/kdotpy-logo-circled.svg b/kdotpy-v1.0.0/docs/logo/kdotpy-logo-circled.svg
new file mode 100644
index 0000000000000000000000000000000000000000..0aae2c0b0cb76988d82b92f34b74a7780f315e0a
--- /dev/null
+++ b/kdotpy-v1.0.0/docs/logo/kdotpy-logo-circled.svg
@@ -0,0 +1,115 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+   width="60mm"
+   height="60mm"
+   viewBox="0 0 60 60"
+   version="1.1"
+   id="svg8"
+   inkscape:version="1.1.2 (0a00cf5339, 2022-02-04)"
+   sodipodi:docname="kdotpy-logo-circled.svg"
+   inkscape:export-filename="/home/wbeugeling/Documents/kdotpy-logo-wb-64.png"
+   inkscape:export-xdpi="27.093334"
+   inkscape:export-ydpi="27.093334"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:dc="http://purl.org/dc/elements/1.1/">
+  <defs
+     id="defs2" />
+  <sodipodi:namedview
+     id="base"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageopacity="0.0"
+     inkscape:pageshadow="2"
+     inkscape:zoom="2.0399495"
+     inkscape:cx="47.305093"
+     inkscape:cy="44.363843"
+     inkscape:document-units="mm"
+     inkscape:current-layer="layer1"
+     inkscape:document-rotation="0"
+     showgrid="true"
+     inkscape:pagecheckerboard="false"
+     inkscape:snap-bbox="true"
+     inkscape:snap-page="true"
+     inkscape:snap-bbox-midpoints="true"
+     inkscape:window-width="1920"
+     inkscape:window-height="1132"
+     inkscape:window-x="0"
+     inkscape:window-y="0"
+     inkscape:window-maximized="1"
+     inkscape:snap-text-baseline="true"
+     inkscape:snap-intersection-paths="false"
+     inkscape:snap-smooth-nodes="true">
+    <inkscape:grid
+       type="xygrid"
+       id="grid841"
+       units="mm"
+       spacingx="1.9999999"
+       spacingy="1.9999999"
+       originx="0"
+       originy="0" />
+    <inkscape:grid
+       type="xygrid"
+       id="grid1051"
+       units="mm"
+       spacingx="0.5"
+       spacingy="0.5"
+       color="#ff3f3f"
+       opacity="0.1254902"
+       empcolor="#ff3f3f"
+       empopacity="0.25098039"
+       empspacing="4" />
+  </sodipodi:namedview>
+  <metadata
+     id="metadata5">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <g
+     inkscape:label="Layer 1"
+     inkscape:groupmode="layer"
+     id="layer1">
+    <ellipse
+       style="opacity:1;fill:#010000;fill-opacity:1;stroke:none;stroke-width:0.41214;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+       id="path1432"
+       cx="30"
+       cy="29.999998"
+       rx="27"
+       ry="27.000002" />
+    <g
+       aria-label="k py"
+       id="text837"
+       style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:16.9333px;line-height:1.25;font-family:'Latin Modern Sans';-inkscape-font-specification:'Latin Modern Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;letter-spacing:0px;word-spacing:0px;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:0.264583">
+      <path
+         d="m 18.623799,35.4412 c 0,0 0,-0.2286 -0.254,-0.5588 l -4.4196,-5.7912 3.9116,-3.6322 c 0.3048,-0.2794 0.381,-0.3556 0.381,-0.5588 0,-0.5334 -0.508,-0.5334 -0.8636,-0.5334 h -1.4732 c -0.4572,0 -0.9398,0 -1.524,0.5334 l -3.6576,3.4036 v -8.7122 c 0,-0.8128 -0.1778,-1.2192 -1.2191995,-1.2192 h -0.5334 c -0.9144,0 -1.2192,0.2794 -1.2192,1.2192 v 15.1892 c 0,0.9144 0.2794,1.2192 1.2192,1.2192 h 0.4572 c 0.9651995,0 1.2191995,-0.3302 1.2191995,-1.2192 v -2.6416 c 0.4064,-0.3556 0.7874,-0.7366 1.1938,-1.0922 l 3.302,4.318 c 0.4318,0.5588 0.6096,0.635 1.3462,0.635 h 1.2192 c 0.3556,0 0.9144,0 0.9144,-0.5588 z"
+         style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:25.4px;font-family:'Latin Modern Sans';-inkscape-font-specification:'Latin Modern Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;fill:#ffffff;fill-opacity:1;stroke-width:0.264583"
+         id="path865" />
+      <path
+         d="m 42.09332,30.1326 c 0,-1.2446 0,-6.0452 -4.5974,-6.0452 -1.4986,0 -2.8448,0.6096 -3.7592,1.4732 0,-0.9398 -0.3556,-1.1938 -1.2192,-1.1938 h -0.9398 c -0.9144,0 -1.2192,0.2794 -1.2192,1.2192 v 14.1224 c 0,0.9144 0.2794,1.2192 1.2192,1.2192 h 1.016 c 0.9652,0 1.2192,-0.3302 1.2192,-1.2192 v -4.699 c 1.143,1.27 2.54,1.27 2.8956,1.27 5.3848,0 5.3848,-4.8768 5.3848,-6.1468 z m -3.4544,0.0508 c 0,1.016 0,4.2672 -2.8702,4.2672 -0.5842,0 -1.0414,-0.2032 -1.3462,-0.4318 -0.6096,-0.4318 -0.6096,-0.5588 -0.6096,-1.016 v -6.096 c 0.2794,-0.254 1.1176,-0.8382 2.2098,-0.8382 2.5908,0 2.6162,2.9972 2.6162,4.1148 z"
+         style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:25.4px;font-family:'Latin Modern Sans';-inkscape-font-specification:'Latin Modern Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;fill:#ffffff;fill-opacity:1;stroke-width:0.264583"
+         id="path867" />
+      <path
+         d="m 54.285287,25.0526 c 0,-0.6858 -0.6858,-0.6858 -1.016,-0.6858 h -0.5588 c -0.4064,0 -0.9652,0 -1.3462,0.5842 -0.0762,0.127 -2.1336,5.7404 -2.2606,7.4422 h -0.0254 c -0.127,-1.2446 -1.1684,-3.6068 -2.2606,-6.0452 -0.7112,-1.6002 -0.889,-1.9812 -2.159,-1.9812 h -0.6604 c -0.3556,0 -0.9906,0 -0.9906,0.6604 0,0.1016 0.0762,0.2794 0.127,0.4064 L 47.935287,36 c -0.2032,0.5334 -0.2794,0.8128 -0.3556,1.0668 -0.3048,0.8382 -0.7366,1.9812 -2.1082,1.9812 -0.8382,0 -1.4224,-0.3556 -1.7018,-0.5334 -0.127,-0.0762 -0.1778,-0.1016 -0.254,-0.1016 -0.127,0 -0.3302,0.0762 -0.3302,0.381 0,0.2032 0.1524,1.9558 0.2286,2.0574 0.2286,0.3048 1.6002,0.3556 2.0066,0.3556 3.1496,0 4.3942,-2.9972 4.5974,-3.6068 l 4.1402,-12.0396 c 0.127,-0.3302 0.127,-0.381 0.127,-0.508 z"
+         style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:25.4px;font-family:'Latin Modern Sans';-inkscape-font-specification:'Latin Modern Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;fill:#ffffff;fill-opacity:1;stroke-width:0.264583"
+         id="path869" />
+    </g>
+    <ellipse
+       style="opacity:1;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:0.396875;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+       id="path839"
+       cy="29.999998"
+       cx="24"
+       rx="2.5"
+       ry="2.4999981" />
+  </g>
+</svg>
diff --git a/kdotpy-v1.0.0/docs/logo/kdotpy-logo-squared-200.png b/kdotpy-v1.0.0/docs/logo/kdotpy-logo-squared-200.png
new file mode 100644
index 0000000000000000000000000000000000000000..f0c7de19594caa0b06629aa15b0992f6845b8fb7
Binary files /dev/null and b/kdotpy-v1.0.0/docs/logo/kdotpy-logo-squared-200.png differ
diff --git a/kdotpy-v1.0.0/docs/logo/kdotpy-logo-squared.svg b/kdotpy-v1.0.0/docs/logo/kdotpy-logo-squared.svg
new file mode 100644
index 0000000000000000000000000000000000000000..a9ac3cc50669fc6f39b7185680139c9d8050d27f
--- /dev/null
+++ b/kdotpy-v1.0.0/docs/logo/kdotpy-logo-squared.svg
@@ -0,0 +1,116 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+   width="60mm"
+   height="60mm"
+   viewBox="0 0 60 60"
+   version="1.1"
+   id="svg8"
+   inkscape:version="1.1.2 (0a00cf5339, 2022-02-04)"
+   sodipodi:docname="kdotpy-loo.svg"
+   inkscape:export-filename="/home/wbeugeling/Documents/kdotpy-logo-wb-64.png"
+   inkscape:export-xdpi="27.093334"
+   inkscape:export-ydpi="27.093334"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:dc="http://purl.org/dc/elements/1.1/">
+  <defs
+     id="defs2" />
+  <sodipodi:namedview
+     id="base"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageopacity="0.0"
+     inkscape:pageshadow="2"
+     inkscape:zoom="0.9531683"
+     inkscape:cx="7.868495"
+     inkscape:cy="4.721097"
+     inkscape:document-units="mm"
+     inkscape:current-layer="layer1"
+     inkscape:document-rotation="0"
+     showgrid="true"
+     inkscape:pagecheckerboard="false"
+     inkscape:snap-bbox="true"
+     inkscape:snap-page="true"
+     inkscape:snap-bbox-midpoints="true"
+     inkscape:window-width="1920"
+     inkscape:window-height="1132"
+     inkscape:window-x="0"
+     inkscape:window-y="0"
+     inkscape:window-maximized="1"
+     inkscape:snap-text-baseline="true"
+     inkscape:snap-intersection-paths="false"
+     inkscape:snap-smooth-nodes="true">
+    <inkscape:grid
+       type="xygrid"
+       id="grid841"
+       units="mm"
+       spacingx="1.9999999"
+       spacingy="1.9999999"
+       originx="0"
+       originy="0" />
+    <inkscape:grid
+       type="xygrid"
+       id="grid1051"
+       units="mm"
+       spacingx="0.5"
+       spacingy="0.5"
+       color="#ff3f3f"
+       opacity="0.1254902"
+       empcolor="#ff3f3f"
+       empopacity="0.25098039"
+       empspacing="4" />
+  </sodipodi:namedview>
+  <metadata
+     id="metadata5">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <g
+     inkscape:label="Layer 1"
+     inkscape:groupmode="layer"
+     id="layer1">
+    <rect
+       style="opacity:1;fill:#010000;fill-opacity:1;stroke:none;stroke-width:0.396875;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+       id="rect2922"
+       width="54"
+       height="54"
+       x="3"
+       y="3"
+       ry="6.9999995" />
+    <g
+       aria-label="k py"
+       id="text837"
+       style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:16.9333px;line-height:1.25;font-family:'Latin Modern Sans';-inkscape-font-specification:'Latin Modern Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;letter-spacing:0px;word-spacing:0px;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:0.264583">
+      <path
+         d="m 18.623799,35.4412 c 0,0 0,-0.2286 -0.254,-0.5588 l -4.4196,-5.7912 3.9116,-3.6322 c 0.3048,-0.2794 0.381,-0.3556 0.381,-0.5588 0,-0.5334 -0.508,-0.5334 -0.8636,-0.5334 h -1.4732 c -0.4572,0 -0.9398,0 -1.524,0.5334 l -3.6576,3.4036 v -8.7122 c 0,-0.8128 -0.1778,-1.2192 -1.2191995,-1.2192 h -0.5334 c -0.9144,0 -1.2192,0.2794 -1.2192,1.2192 v 15.1892 c 0,0.9144 0.2794,1.2192 1.2192,1.2192 h 0.4572 c 0.9651995,0 1.2191995,-0.3302 1.2191995,-1.2192 v -2.6416 c 0.4064,-0.3556 0.7874,-0.7366 1.1938,-1.0922 l 3.302,4.318 c 0.4318,0.5588 0.6096,0.635 1.3462,0.635 h 1.2192 c 0.3556,0 0.9144,0 0.9144,-0.5588 z"
+         style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:25.4px;font-family:'Latin Modern Sans';-inkscape-font-specification:'Latin Modern Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;fill:#ffffff;fill-opacity:1;stroke-width:0.264583"
+         id="path865" />
+      <path
+         d="m 42.09332,30.1326 c 0,-1.2446 0,-6.0452 -4.5974,-6.0452 -1.4986,0 -2.8448,0.6096 -3.7592,1.4732 0,-0.9398 -0.3556,-1.1938 -1.2192,-1.1938 h -0.9398 c -0.9144,0 -1.2192,0.2794 -1.2192,1.2192 v 14.1224 c 0,0.9144 0.2794,1.2192 1.2192,1.2192 h 1.016 c 0.9652,0 1.2192,-0.3302 1.2192,-1.2192 v -4.699 c 1.143,1.27 2.54,1.27 2.8956,1.27 5.3848,0 5.3848,-4.8768 5.3848,-6.1468 z m -3.4544,0.0508 c 0,1.016 0,4.2672 -2.8702,4.2672 -0.5842,0 -1.0414,-0.2032 -1.3462,-0.4318 -0.6096,-0.4318 -0.6096,-0.5588 -0.6096,-1.016 v -6.096 c 0.2794,-0.254 1.1176,-0.8382 2.2098,-0.8382 2.5908,0 2.6162,2.9972 2.6162,4.1148 z"
+         style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:25.4px;font-family:'Latin Modern Sans';-inkscape-font-specification:'Latin Modern Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;fill:#ffffff;fill-opacity:1;stroke-width:0.264583"
+         id="path867" />
+      <path
+         d="m 54.285287,25.0526 c 0,-0.6858 -0.6858,-0.6858 -1.016,-0.6858 h -0.5588 c -0.4064,0 -0.9652,0 -1.3462,0.5842 -0.0762,0.127 -2.1336,5.7404 -2.2606,7.4422 h -0.0254 c -0.127,-1.2446 -1.1684,-3.6068 -2.2606,-6.0452 -0.7112,-1.6002 -0.889,-1.9812 -2.159,-1.9812 h -0.6604 c -0.3556,0 -0.9906,0 -0.9906,0.6604 0,0.1016 0.0762,0.2794 0.127,0.4064 L 47.935287,36 c -0.2032,0.5334 -0.2794,0.8128 -0.3556,1.0668 -0.3048,0.8382 -0.7366,1.9812 -2.1082,1.9812 -0.8382,0 -1.4224,-0.3556 -1.7018,-0.5334 -0.127,-0.0762 -0.1778,-0.1016 -0.254,-0.1016 -0.127,0 -0.3302,0.0762 -0.3302,0.381 0,0.2032 0.1524,1.9558 0.2286,2.0574 0.2286,0.3048 1.6002,0.3556 2.0066,0.3556 3.1496,0 4.3942,-2.9972 4.5974,-3.6068 l 4.1402,-12.0396 c 0.127,-0.3302 0.127,-0.381 0.127,-0.508 z"
+         style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:25.4px;font-family:'Latin Modern Sans';-inkscape-font-specification:'Latin Modern Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;fill:#ffffff;fill-opacity:1;stroke-width:0.264583"
+         id="path869" />
+    </g>
+    <ellipse
+       style="opacity:1;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:0.396875;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+       id="path839"
+       cy="29.999998"
+       cx="24"
+       rx="2.5"
+       ry="2.4999981" />
+  </g>
+</svg>
diff --git a/kdotpy-v1.0.0/docs/logo/kdotpy-logo-text-to-path.svg b/kdotpy-v1.0.0/docs/logo/kdotpy-logo-text-to-path.svg
new file mode 100644
index 0000000000000000000000000000000000000000..2ad013dc3d752edf3bf94085d70e32feb38a856d
--- /dev/null
+++ b/kdotpy-v1.0.0/docs/logo/kdotpy-logo-text-to-path.svg
@@ -0,0 +1,246 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:xlink="http://www.w3.org/1999/xlink"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="60mm"
+   height="60mm"
+   viewBox="0 0 60 60"
+   version="1.1"
+   id="svg8"
+   inkscape:version="1.0.2 (e86c870879, 2021-01-15)"
+   sodipodi:docname="kdotpy-logo-text-to-path.svg"
+   inkscape:export-filename="/home/wbeugeling/Documents/kdotpy-logo-wb-64.png"
+   inkscape:export-xdpi="27.093334"
+   inkscape:export-ydpi="27.093334">
+  <defs
+     id="defs2">
+    <linearGradient
+       inkscape:collect="always"
+       id="linearGradient970">
+      <stop
+         style="stop-color:#d1e3fa;stop-opacity:1"
+         offset="0"
+         id="stop966" />
+      <stop
+         style="stop-color:#76adf0;stop-opacity:1"
+         offset="1"
+         id="stop968" />
+    </linearGradient>
+    <linearGradient
+       inkscape:collect="always"
+       id="linearGradient954">
+      <stop
+         style="stop-color:#4692ea;stop-opacity:1;"
+         offset="0"
+         id="stop950" />
+      <stop
+         style="stop-color:#a5c9f4;stop-opacity:1"
+         offset="1"
+         id="stop952" />
+    </linearGradient>
+    <linearGradient
+       inkscape:collect="always"
+       xlink:href="#linearGradient970"
+       id="linearGradient920"
+       x1="45.999996"
+       y1="7.9999995"
+       x2="13.999999"
+       y2="3.9999998"
+       gradientUnits="userSpaceOnUse" />
+    <linearGradient
+       inkscape:collect="always"
+       xlink:href="#linearGradient970"
+       id="linearGradient928"
+       x1="49.999996"
+       y1="55.999996"
+       x2="9.999999"
+       y2="51.999996"
+       gradientUnits="userSpaceOnUse" />
+    <linearGradient
+       inkscape:collect="always"
+       xlink:href="#linearGradient954"
+       id="linearGradient956"
+       x1="0"
+       y1="29.999998"
+       x2="59.999996"
+       y2="29.999998"
+       gradientUnits="userSpaceOnUse" />
+    <linearGradient
+       inkscape:collect="always"
+       xlink:href="#linearGradient954"
+       id="linearGradient1073"
+       x1="56"
+       y1="56"
+       x2="4"
+       y2="4"
+       gradientUnits="userSpaceOnUse" />
+  </defs>
+  <sodipodi:namedview
+     id="base"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageopacity="0.0"
+     inkscape:pageshadow="2"
+     inkscape:zoom="1.4"
+     inkscape:cx="56.727563"
+     inkscape:cy="80.682791"
+     inkscape:document-units="mm"
+     inkscape:current-layer="layer1"
+     inkscape:document-rotation="0"
+     showgrid="true"
+     inkscape:pagecheckerboard="false"
+     inkscape:snap-bbox="true"
+     inkscape:snap-page="true"
+     inkscape:snap-bbox-midpoints="true"
+     inkscape:window-width="1920"
+     inkscape:window-height="1021"
+     inkscape:window-x="0"
+     inkscape:window-y="0"
+     inkscape:window-maximized="1"
+     inkscape:snap-text-baseline="true"
+     inkscape:snap-intersection-paths="false"
+     inkscape:snap-smooth-nodes="true">
+    <inkscape:grid
+       type="xygrid"
+       id="grid841"
+       units="mm"
+       spacingx="1.9999999"
+       spacingy="1.9999999"
+       originx="0"
+       originy="0" />
+    <inkscape:grid
+       type="xygrid"
+       id="grid1051"
+       units="mm"
+       spacingx="0.5"
+       spacingy="0.5"
+       color="#ff3f3f"
+       opacity="0.1254902"
+       empcolor="#ff3f3f"
+       empopacity="0.25098039"
+       empspacing="4" />
+  </sodipodi:namedview>
+  <metadata
+     id="metadata5">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title></dc:title>
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <g
+     inkscape:label="Layer 1"
+     inkscape:groupmode="layer"
+     id="layer1">
+    <rect
+       style="opacity:1;fill:url(#linearGradient956);fill-opacity:1;stroke:none;stroke-width:0.396875;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+       id="rect833"
+       width="59.999996"
+       height="59.999996"
+       x="0"
+       y="0" />
+    <g
+       aria-label="k py"
+       id="text837"
+       style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:16.9333px;line-height:1.25;font-family:'Latin Modern Sans';-inkscape-font-specification:'Latin Modern Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;letter-spacing:0px;word-spacing:0px;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:0.264583">
+      <path
+         d="m 18.623799,35.4412 c 0,0 0,-0.2286 -0.254,-0.5588 l -4.4196,-5.7912 3.9116,-3.6322 c 0.3048,-0.2794 0.381,-0.3556 0.381,-0.5588 0,-0.5334 -0.508,-0.5334 -0.8636,-0.5334 h -1.4732 c -0.4572,0 -0.9398,0 -1.524,0.5334 l -3.6576,3.4036 v -8.7122 c 0,-0.8128 -0.1778,-1.2192 -1.2191995,-1.2192 h -0.5334 c -0.9144,0 -1.2192,0.2794 -1.2192,1.2192 v 15.1892 c 0,0.9144 0.2794,1.2192 1.2192,1.2192 h 0.4572 c 0.9651995,0 1.2191995,-0.3302 1.2191995,-1.2192 v -2.6416 c 0.4064,-0.3556 0.7874,-0.7366 1.1938,-1.0922 l 3.302,4.318 c 0.4318,0.5588 0.6096,0.635 1.3462,0.635 h 1.2192 c 0.3556,0 0.9144,0 0.9144,-0.5588 z"
+         style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:25.4px;font-family:'Latin Modern Sans';-inkscape-font-specification:'Latin Modern Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;fill:#ffffff;fill-opacity:1;stroke-width:0.264583"
+         id="path865" />
+      <path
+         d="m 42.09332,30.1326 c 0,-1.2446 0,-6.0452 -4.5974,-6.0452 -1.4986,0 -2.8448,0.6096 -3.7592,1.4732 0,-0.9398 -0.3556,-1.1938 -1.2192,-1.1938 h -0.9398 c -0.9144,0 -1.2192,0.2794 -1.2192,1.2192 v 14.1224 c 0,0.9144 0.2794,1.2192 1.2192,1.2192 h 1.016 c 0.9652,0 1.2192,-0.3302 1.2192,-1.2192 v -4.699 c 1.143,1.27 2.54,1.27 2.8956,1.27 5.3848,0 5.3848,-4.8768 5.3848,-6.1468 z m -3.4544,0.0508 c 0,1.016 0,4.2672 -2.8702,4.2672 -0.5842,0 -1.0414,-0.2032 -1.3462,-0.4318 -0.6096,-0.4318 -0.6096,-0.5588 -0.6096,-1.016 v -6.096 c 0.2794,-0.254 1.1176,-0.8382 2.2098,-0.8382 2.5908,0 2.6162,2.9972 2.6162,4.1148 z"
+         style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:25.4px;font-family:'Latin Modern Sans';-inkscape-font-specification:'Latin Modern Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;fill:#ffffff;fill-opacity:1;stroke-width:0.264583"
+         id="path867" />
+      <path
+         d="m 54.285287,25.0526 c 0,-0.6858 -0.6858,-0.6858 -1.016,-0.6858 h -0.5588 c -0.4064,0 -0.9652,0 -1.3462,0.5842 -0.0762,0.127 -2.1336,5.7404 -2.2606,7.4422 h -0.0254 c -0.127,-1.2446 -1.1684,-3.6068 -2.2606,-6.0452 -0.7112,-1.6002 -0.889,-1.9812 -2.159,-1.9812 h -0.6604 c -0.3556,0 -0.9906,0 -0.9906,0.6604 0,0.1016 0.0762,0.2794 0.127,0.4064 L 47.935287,36 c -0.2032,0.5334 -0.2794,0.8128 -0.3556,1.0668 -0.3048,0.8382 -0.7366,1.9812 -2.1082,1.9812 -0.8382,0 -1.4224,-0.3556 -1.7018,-0.5334 -0.127,-0.0762 -0.1778,-0.1016 -0.254,-0.1016 -0.127,0 -0.3302,0.0762 -0.3302,0.381 0,0.2032 0.1524,1.9558 0.2286,2.0574 0.2286,0.3048 1.6002,0.3556 2.0066,0.3556 3.1496,0 4.3942,-2.9972 4.5974,-3.6068 l 4.1402,-12.0396 c 0.127,-0.3302 0.127,-0.381 0.127,-0.508 z"
+         style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:25.4px;font-family:'Latin Modern Sans';-inkscape-font-specification:'Latin Modern Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;fill:#ffffff;fill-opacity:1;stroke-width:0.264583"
+         id="path869" />
+    </g>
+    <ellipse
+       style="opacity:1;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:0.396875;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+       id="path839"
+       cy="29.999998"
+       cx="24"
+       rx="2.5"
+       ry="2.4999981" />
+    <text
+       xml:space="preserve"
+       style="font-size:3.52777px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;stroke-width:0.264583"
+       x="15.999999"
+       y="15.999999"
+       id="text838"><tspan
+         sodipodi:role="line"
+         id="tspan836"
+         x="15.999999"
+         y="15.999999"
+         style="stroke-width:0.264583" /></text>
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:16.9333px;line-height:1.25;font-family:'Latin Modern Sans';-inkscape-font-specification:'Latin Modern Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;letter-spacing:0px;word-spacing:0px;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="23.999998"
+       y="31.999998"
+       id="text837-4"><tspan
+         sodipodi:role="line"
+         id="tspan835-4"
+         x="23.999998"
+         y="31.999998"
+         style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:8.46667px;font-family:'Latin Modern Sans';-inkscape-font-specification:'Latin Modern Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;fill:#ffffff;fill-opacity:1;stroke-width:0.264583" /></text>
+    <path
+       style="fill:url(#linearGradient920);fill-opacity:1;stroke:none;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 3.9999998,0 c 7.9999992,9.9999995 15.9999992,15.999999 25.9999982,15.999999 9.999998,0 18,-5.9999995 25.999999,-15.999999"
+       id="path864"
+       sodipodi:nodetypes="csc" />
+    <path
+       style="fill:url(#linearGradient928);stroke:none;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;fill-opacity:1"
+       d="M 59.999996,59.999996 C 53.999997,51.999997 47.999999,45.999998 41.999998,45.999998 c -5.999998,0 -8,3.999999 -12,3.999999 -4,0 -6,-3.999999 -11.999999,-3.999999 -6,0 -11.9999993,5.999999 -17.999999,13.999998 l 59.999997,10e-7"
+       id="path866"
+       sodipodi:nodetypes="csssc" />
+    <rect
+       style="fill:#155db7;fill-opacity:1;stroke:none;stroke-width:0.264583;stroke-dasharray:1.05833, 0.52916700000000005;stroke-opacity:1"
+       id="rect888"
+       width="5.9999995"
+       height="5.9999995"
+       x="-29.999998"
+       y="0" />
+    <rect
+       style="fill:#4692ea;fill-opacity:1;stroke:none;stroke-width:0.265;stroke-dasharray:1.06, 0.53000000000000003;stroke-opacity:1;stroke-miterlimit:4;stroke-dashoffset:0"
+       id="rect890"
+       width="5.9999995"
+       height="5.9999995"
+       x="-29.999998"
+       y="9.999999" />
+    <rect
+       style="fill:#a4c8f4;fill-opacity:1;stroke:none;stroke-width:0.264583;stroke-dasharray:1.05833, 0.52916700000000005;stroke-opacity:1"
+       id="rect888-4"
+       width="5.9999995"
+       height="5.9999995"
+       x="-29.999998"
+       y="19.999998" />
+    <rect
+       style="fill:#d1e3fa;fill-opacity:1;stroke:none;stroke-width:0.264999;stroke-miterlimit:4;stroke-dasharray:1.06, 0.53000000000000003;stroke-dashoffset:0;stroke-opacity:1"
+       id="rect912"
+       width="5.9999995"
+       height="5.9999995"
+       x="-29.999998"
+       y="29.999998" />
+    <rect
+       style="fill:#4b8ad3;fill-opacity:0.0235294;stroke:url(#linearGradient1073);stroke-width:1;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+       id="rect1065"
+       width="59"
+       height="59"
+       x="0.5"
+       y="0.5"
+       ry="0" />
+  </g>
+</svg>
diff --git a/kdotpy-v1.0.0/docs/logo/kdotpy-logo.svg b/kdotpy-v1.0.0/docs/logo/kdotpy-logo.svg
new file mode 100644
index 0000000000000000000000000000000000000000..2f7c6999a390bb4d5224a27b78ec52f4e0ca8af5
--- /dev/null
+++ b/kdotpy-v1.0.0/docs/logo/kdotpy-logo.svg
@@ -0,0 +1,240 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:xlink="http://www.w3.org/1999/xlink"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="60mm"
+   height="60mm"
+   viewBox="0 0 60 60"
+   version="1.1"
+   id="svg8"
+   inkscape:version="1.0.2 (e86c870879, 2021-01-15)"
+   sodipodi:docname="kdotpy-logo.svg"
+   inkscape:export-filename="/home/wbeugeling/Documents/kdotpy-logo-wb-64.png"
+   inkscape:export-xdpi="27.093334"
+   inkscape:export-ydpi="27.093334">
+  <defs
+     id="defs2">
+    <linearGradient
+       inkscape:collect="always"
+       id="linearGradient970">
+      <stop
+         style="stop-color:#d1e3fa;stop-opacity:1"
+         offset="0"
+         id="stop966" />
+      <stop
+         style="stop-color:#76adf0;stop-opacity:1"
+         offset="1"
+         id="stop968" />
+    </linearGradient>
+    <linearGradient
+       inkscape:collect="always"
+       id="linearGradient954">
+      <stop
+         style="stop-color:#4692ea;stop-opacity:1;"
+         offset="0"
+         id="stop950" />
+      <stop
+         style="stop-color:#a5c9f4;stop-opacity:1"
+         offset="1"
+         id="stop952" />
+    </linearGradient>
+    <linearGradient
+       inkscape:collect="always"
+       xlink:href="#linearGradient970"
+       id="linearGradient920"
+       x1="45.999996"
+       y1="7.9999995"
+       x2="13.999999"
+       y2="3.9999998"
+       gradientUnits="userSpaceOnUse" />
+    <linearGradient
+       inkscape:collect="always"
+       xlink:href="#linearGradient970"
+       id="linearGradient928"
+       x1="49.999996"
+       y1="55.999996"
+       x2="9.999999"
+       y2="51.999996"
+       gradientUnits="userSpaceOnUse" />
+    <linearGradient
+       inkscape:collect="always"
+       xlink:href="#linearGradient954"
+       id="linearGradient956"
+       x1="0"
+       y1="29.999998"
+       x2="59.999996"
+       y2="29.999998"
+       gradientUnits="userSpaceOnUse" />
+    <linearGradient
+       inkscape:collect="always"
+       xlink:href="#linearGradient954"
+       id="linearGradient1073"
+       x1="56"
+       y1="56"
+       x2="4"
+       y2="4"
+       gradientUnits="userSpaceOnUse" />
+  </defs>
+  <sodipodi:namedview
+     id="base"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageopacity="0.0"
+     inkscape:pageshadow="2"
+     inkscape:zoom="1.979899"
+     inkscape:cx="-33.68391"
+     inkscape:cy="100.30693"
+     inkscape:document-units="mm"
+     inkscape:current-layer="layer1"
+     inkscape:document-rotation="0"
+     showgrid="true"
+     inkscape:pagecheckerboard="false"
+     inkscape:snap-bbox="true"
+     inkscape:snap-page="true"
+     inkscape:snap-bbox-midpoints="true"
+     inkscape:window-width="1920"
+     inkscape:window-height="1021"
+     inkscape:window-x="0"
+     inkscape:window-y="0"
+     inkscape:window-maximized="1"
+     inkscape:snap-text-baseline="true"
+     inkscape:snap-intersection-paths="false"
+     inkscape:snap-smooth-nodes="true">
+    <inkscape:grid
+       type="xygrid"
+       id="grid841"
+       units="mm"
+       spacingx="1.9999999"
+       spacingy="1.9999999"
+       originx="0"
+       originy="0" />
+    <inkscape:grid
+       type="xygrid"
+       id="grid1051"
+       units="mm"
+       spacingx="0.5"
+       spacingy="0.5"
+       color="#ff3f3f"
+       opacity="0.1254902"
+       empcolor="#ff3f3f"
+       empopacity="0.25098039"
+       empspacing="4" />
+  </sodipodi:namedview>
+  <metadata
+     id="metadata5">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title />
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <g
+     inkscape:label="Layer 1"
+     inkscape:groupmode="layer"
+     id="layer1">
+    <rect
+       style="opacity:1;fill:url(#linearGradient956);fill-opacity:1;stroke:none;stroke-width:0.396875;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+       id="rect833"
+       width="59.999996"
+       height="59.999996"
+       x="0"
+       y="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:16.9333px;line-height:1.25;font-family:'Latin Modern Sans';-inkscape-font-specification:'Latin Modern Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;letter-spacing:0px;word-spacing:0px;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="5.9999995"
+       y="36"
+       id="text837"><tspan
+         sodipodi:role="line"
+         id="tspan835"
+         x="5.9999995"
+         y="36"
+         style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:25.4px;font-family:'Latin Modern Sans';-inkscape-font-specification:'Latin Modern Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;fill:#ffffff;fill-opacity:1;stroke-width:0.264583">k py</tspan></text>
+    <ellipse
+       style="opacity:1;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:0.396875;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+       id="path839"
+       cy="29.999998"
+       cx="24"
+       rx="2.5"
+       ry="2.4999981" />
+    <text
+       xml:space="preserve"
+       style="font-size:3.52777px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;stroke-width:0.264583"
+       x="15.999999"
+       y="15.999999"
+       id="text838"><tspan
+         sodipodi:role="line"
+         id="tspan836"
+         x="15.999999"
+         y="15.999999"
+         style="stroke-width:0.264583" /></text>
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:16.9333px;line-height:1.25;font-family:'Latin Modern Sans';-inkscape-font-specification:'Latin Modern Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;letter-spacing:0px;word-spacing:0px;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="23.999998"
+       y="31.999998"
+       id="text837-4"><tspan
+         sodipodi:role="line"
+         id="tspan835-4"
+         x="23.999998"
+         y="31.999998"
+         style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:8.46667px;font-family:'Latin Modern Sans';-inkscape-font-specification:'Latin Modern Sans, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;fill:#ffffff;fill-opacity:1;stroke-width:0.264583" /></text>
+    <path
+       style="fill:url(#linearGradient920);fill-opacity:1;stroke:none;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 3.9999998,0 c 7.9999992,9.9999995 15.9999992,15.999999 25.9999982,15.999999 9.999998,0 18,-5.9999995 25.999999,-15.999999"
+       id="path864"
+       sodipodi:nodetypes="csc" />
+    <path
+       style="fill:url(#linearGradient928);stroke:none;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;fill-opacity:1"
+       d="M 59.999996,59.999996 C 53.999997,51.999997 47.999999,45.999998 41.999998,45.999998 c -5.999998,0 -8,3.999999 -12,3.999999 -4,0 -6,-3.999999 -11.999999,-3.999999 -6,0 -11.9999993,5.999999 -17.999999,13.999998 l 59.999997,10e-7"
+       id="path866"
+       sodipodi:nodetypes="csssc" />
+    <rect
+       style="fill:#155db7;fill-opacity:1;stroke:none;stroke-width:0.264583;stroke-dasharray:1.05833, 0.52916700000000005;stroke-opacity:1"
+       id="rect888"
+       width="5.9999995"
+       height="5.9999995"
+       x="-29.999998"
+       y="0" />
+    <rect
+       style="fill:#4692ea;fill-opacity:1;stroke:none;stroke-width:0.265;stroke-dasharray:1.06, 0.53000000000000003;stroke-opacity:1;stroke-miterlimit:4;stroke-dashoffset:0"
+       id="rect890"
+       width="5.9999995"
+       height="5.9999995"
+       x="-29.999998"
+       y="9.999999" />
+    <rect
+       style="fill:#a4c8f4;fill-opacity:1;stroke:none;stroke-width:0.264583;stroke-dasharray:1.05833, 0.52916700000000005;stroke-opacity:1"
+       id="rect888-4"
+       width="5.9999995"
+       height="5.9999995"
+       x="-29.999998"
+       y="19.999998" />
+    <rect
+       style="fill:#d1e3fa;fill-opacity:1;stroke:none;stroke-width:0.264999;stroke-miterlimit:4;stroke-dasharray:1.06, 0.53000000000000003;stroke-dashoffset:0;stroke-opacity:1"
+       id="rect912"
+       width="5.9999995"
+       height="5.9999995"
+       x="-29.999998"
+       y="29.999998" />
+    <rect
+       style="fill:#4b8ad3;fill-opacity:0.0235294;stroke:url(#linearGradient1073);stroke-width:1;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+       id="rect1065"
+       width="59"
+       height="59"
+       x="0.5"
+       y="0.5"
+       ry="0" />
+  </g>
+</svg>
diff --git a/kdotpy-v1.0.0/gitv.sh b/kdotpy-v1.0.0/gitv.sh
new file mode 100755
index 0000000000000000000000000000000000000000..0951a6e147ae68478a38ad64f6e920a7bcf1e05f
--- /dev/null
+++ b/kdotpy-v1.0.0/gitv.sh
@@ -0,0 +1,7 @@
+#! /bin/bash
+if [ -d .git ]; then
+  if [ ! -w . ]; then exit 1; fi
+  if [[ -f gitv && ! -w gitv ]]; then exit 2; fi
+  bash gitversion.sh > gitv
+fi
+
diff --git a/kdotpy-v1.0.0/gitversion.sh b/kdotpy-v1.0.0/gitversion.sh
new file mode 100755
index 0000000000000000000000000000000000000000..e2638e4abede6c85ae87f46f0f55abfb432799d8
--- /dev/null
+++ b/kdotpy-v1.0.0/gitversion.sh
@@ -0,0 +1,9 @@
+#! /bin/bash
+# The following command is also defined in version.py and must be identical to
+# these. If the commands are updated here, version.py must be updated too.
+if [ -d .git ]; then
+  git describe | sed -e 's/^v//g'
+else
+  echo "Not a git directory" >&2
+  exit 1
+fi
diff --git a/kdotpy-v1.0.0/pyproject.toml b/kdotpy-v1.0.0/pyproject.toml
new file mode 100644
index 0000000000000000000000000000000000000000..bd67f54ccb48695a6dcc828fddd83d2c56ec59f5
--- /dev/null
+++ b/kdotpy-v1.0.0/pyproject.toml
@@ -0,0 +1,63 @@
+[build-system]
+requires = ["flit_core >=3.2,<4"]
+build-backend = "flit_core.buildapi"
+
+[project]
+name = "kdotpy"
+version = "1.0.0"
+description = "k·p theory on a lattice for simulating semiconductor band structures"
+authors = [
+    {name = "Wouter Beugeling"},
+    {name = "Florian Bayer"},
+    {name = "Christian Berger"},
+    {name = "Jan Böttcher"},
+    {name = "Leonid Bovkun"},
+    {name = "Christopher Fuchs"},
+    {name = "Maximilian Hofer"},
+    {name = "Saquib Shamim"},
+    {name = "Moritz Siebert"},
+    {name = "Li-Xian Wang"},
+    {name = "Ewelina M. Hankiewicz"},
+    {name = "Tobias Kießling"},
+    {name = "Hartmut Buhmann"},
+    {name = "Laurens W. Molenkamp"},
+    {email = "kdotpy@uni-wuerzburg.de"}
+]
+readme = "README.md"
+classifiers = [
+    "Development Status :: 5 - Production/Stable",
+    "Programming Language :: Python :: 3",
+    "License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
+    "Operating System :: POSIX",
+    "Operating System :: Microsoft :: Windows",
+    "Intended Audience :: Science/Research",
+    "Topic :: Scientific/Engineering :: Physics"
+]
+requires-python = ">=3.9"
+dependencies = [
+    "numpy>=1.20.0,<2",
+    "scipy>=1.6.0",
+    "matplotlib>=3.5.0"
+]
+
+[project.optional-dependencies]
+pandas = ["pandas>=1.0.0"]
+hdf5 = ["h5py>=2.8.0"]
+
+[project.scripts]
+kdotpy = "kdotpy:main"
+
+[project.urls]
+Homepage = "https://kdotpy.physik.uni-wuerzburg.de"
+Repository = "https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy"
+Documentation = "https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy/-/wikis/home"
+Article = "https://arxiv.org/abs/2407.12651"
+
+[tool.flit.sdist]
+include = ["src"]
+exclude = ["docs", "git*", ".git*", "Doxyfile"]
+
+[tool.pytest.ini_options]
+minversion = "6.0"
+python_files = "kdotpy-test.py"
+
diff --git a/kdotpy-v1.0.0/requirements.txt b/kdotpy-v1.0.0/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..fef567a92db2ae3e86b1df430e28c020a03eeec9
--- /dev/null
+++ b/kdotpy-v1.0.0/requirements.txt
@@ -0,0 +1,4 @@
+matplotlib==3.8.4
+numpy==1.26.4
+scipy==1.13.0
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/__init__.py b/kdotpy-v1.0.0/src/kdotpy/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..42d42bf53ccca8a8e752c86c14e897fb2722a8ed
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/__init__.py
@@ -0,0 +1,45 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+from .main import main
+from .version import get_version
+
+__version__ = get_version()
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/__main__.py b/kdotpy-v1.0.0/src/kdotpy/__main__.py
new file mode 100644
index 0000000000000000000000000000000000000000..76455db4bb4e1bbd56f562ca74388d0e4ea6b1a8
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/__main__.py
@@ -0,0 +1,43 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+from . import main
+
+main()
diff --git a/kdotpy-v1.0.0/src/kdotpy/bandalign/__init__.py b/kdotpy-v1.0.0/src/kdotpy/bandalign/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..283b9c3c90767c140d18fdc88965188616fa2b69
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/bandalign/__init__.py
@@ -0,0 +1,42 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+from .bandalign import bandindices
+from .adiabatic import bandindices_adiabatic, bandindices_adiabatic_ll
diff --git a/kdotpy-v1.0.0/src/kdotpy/bandalign/adiabatic.py b/kdotpy-v1.0.0/src/kdotpy/bandalign/adiabatic.py
new file mode 100644
index 0000000000000000000000000000000000000000..41c241e9edfc2b7548c628c994704d482ccc1a0c
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/bandalign/adiabatic.py
@@ -0,0 +1,238 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+import sys
+
+from ..config import get_config_bool
+from ..cnp import estimate_charge_neutrality_point
+from ..diagonalization import DiagData
+from ..diagonalization import diagonalization as diag
+from ..diagonalization import lldiagonalization as lldiag
+from ..momentum import Vector, VectorGrid
+from ..parallel import parallel_apply
+
+from .bandalign import bandindices
+
+### ADIABATIC BAND INDEX INITIALIZATION
+def diag_hz_pot(alpha, params, pot = None, **modelopts):
+	"""Thin wrapper around diag.hz_k0"""
+	alpha_val = alpha.value[0] if isinstance(alpha, Vector) else alpha
+	if pot is None:
+		raise TypeError("Argument pot must be an array")
+	ddp = diag.hz_k0(
+		params, pot = alpha_val * pot, bandtype_warning_level = 0, **modelopts)
+	ddp.paramval = alpha if isinstance(alpha, Vector) else Vector(alpha)
+	return ddp
+
+def diag_hz_pot_ll(alpha, ll_mode, ll_max, h_sym, params, pot = None, **modelopts):
+	"""Thin wrapper around lldiag.hll_k0"""
+	alpha_val = alpha.value[0] if isinstance(alpha, Vector) else alpha
+	if pot is None:
+		raise TypeError("Argument pot must be an array")
+	ddp = lldiag.hll_k0(
+		ll_mode, ll_max, h_sym, params, pot = alpha_val * pot,
+		bandtype_warning_level = 0, modelopts = modelopts)
+	ddp.paramval = alpha if isinstance(alpha, Vector) else Vector(alpha)
+	return ddp
+
+def fmt_eival(x):
+	"""String formatting function for bandindices_automatic_debug()"""
+	return "" if x is None else ("%.3f" % x)
+
+def bandindices_adiabatic_debug(filename, diagdata, alphas, b_max=6):
+	"""Write bandindices_adiabatic(_ll) debug output to a file.
+
+	Arguments:
+	filename   String. The filename of the output file
+	diagdata   DiagData instance. Generated by bandindices_adiabatic(_ll).
+	alphas     Numpy array. Values between 0 and 1.
+	b_max      Positive integer. Only include band indices from -b_max to b_max.
+	"""
+	b_idx = [b for b in range(-b_max, b_max + 1) if b != 0]
+	with open(filename, "w") as f:
+		f.write("alpha," + ",".join("%i" % b for b in b_idx) + "\n")
+		for j, alpha in enumerate(alphas):
+			f.write("%g," % alpha)
+			f.write(",".join([fmt_eival(diagdata[j].get_eival((b,))) for b in b_idx]) + "\n")
+
+def bandindices_adiabatic_verbose(diagdata, alphas):
+	"""Verbose output for bandindices_adiabatic(_ll) to stdout"""
+	for j, alpha in enumerate(alphas):
+		e_m1 = diagdata[j].get_eival((-1,))
+		e_p1 = diagdata[j].get_eival((1,))
+		c_m1 = diagdata[j].get_char((-1,))
+		c_p1 = diagdata[j].get_char((1,))
+		print("alpha = %g: CNP between %s (%.2f meV) and %s (%.2f meV)" % (alpha, c_m1, e_m1, c_p1, e_p1))
+
+def bandindices_adiabatic(
+		params, steps = 10, pot = None, num_cpus = 1, modelopts = None,
+		bandalign_opts = None):
+	"""Do band alignment over slowly increasing potential, starting at zero potential
+
+	Since an electrostatic potential can affect the band characters, these may
+	no longer be suitable to estimate the charge neutrality point (CNP).
+	Instead, estimate the CNP for zero potential and then increase it 'slowly'
+	to full	strength, i.e., diagonalize the Hamiltonian H0 + alpha V, where H0
+	is the Hamiltonian without potential and V is the potential. The coefficient
+	alpha is increased from 0 to 1. Then do band alignment over the sequence of
+	DiagDataPoint instances. The result is the DiagDataPoint for alpha = 1 with
+	the	band indices set.
+
+	Arguments:
+	params          PhysParams instance.
+	steps           Integer. The number of intermediate steps. The step size for
+	                alpha is 1 / steps.
+	pot             Array or None. The potential.
+	num_cpus        Integer. Number of processes/threads for parallelization.
+	modelopts       A dict instance. The options for diagonalization.
+	bandalign_opts  A dict instance. The options for band alignment.
+
+	Returns:
+	ddp      DiagDataPoint for H = H0 + V, i.e., alpha = 1. The band indices
+	         (ddp.bindex) are set.
+	"""
+	if modelopts is None:
+		modelopts = {}
+	if bandalign_opts is None:
+		bandalign_opts = {}
+	modelopts1 = modelopts.copy()  # Make a copy so that we can change it safely
+	if 'obs' in modelopts1:
+		del modelopts1['obs']
+	if 'pot' in modelopts1:
+		del modelopts1['pot']
+
+	alphas = np.linspace(0, 1, steps+1)
+	grid = VectorGrid('x', alphas, astype = 'x', prefix = 'a')
+	diagdata = DiagData(parallel_apply(
+		diag_hz_pot, grid, (params, pot), f_kwds = modelopts1,
+		num_processes = num_cpus, propagate_interrupt = True,
+		description = "Calculating bands (k=0)"),
+		grid = grid)
+
+	# If bandalign_opts contains e0 (the location of the charge neutrality
+	# point), use it. Otherwise, get it from estimate_charge_neutrality_point().
+	if 'e0' in bandalign_opts:
+		ba_data = bandindices(diagdata, input_data=diagdata[0], params=params, **bandalign_opts)
+	else:
+		e0 = estimate_charge_neutrality_point(params, data=diagdata[0])
+		ba_data = bandindices(diagdata, e0=e0, input_data=diagdata[0], params=params, **bandalign_opts)
+
+	if get_config_bool('bandindices_adiabatic_debug'):  # debug output (to a file)
+		debug_file = "bandindices-adiabatic.csv"
+		bandindices_adiabatic_debug(debug_file, diagdata, alphas)
+	if 'verbose' in sys.argv:  # verbose output (to stdout)
+		bandindices_adiabatic_verbose(diagdata, alphas)
+
+	# The value of paramval should be reset to 0, as otherwise paramval = 1
+	# can be misinterpreted as B = 1.
+	diagdata[-1].paramval = Vector(0)
+	return diagdata[-1]
+
+def bandindices_adiabatic_ll(
+		ll_mode, ll_max, h_sym, params, steps=10, pot=None, num_cpus=1,
+		modelopts=None, bandalign_opts=None):
+	"""Do band alignment over slowly increasing potential, starting at zero potential, LL version
+
+	This function is for the most part identical to bandindices_adiabatic() and
+	should/could be	merged into it in the future.
+
+	Since an electrostatic potential can affect the band characters, these may
+	no longer be suitable to estimate the charge neutrality point (CNP).
+	Instead, estimate the CNP for zero potential and then increase it 'slowly'
+	to full	strength, i.e., diagonalize the Hamiltonian H0 + alpha V, where H0
+	is the Hamiltonian without potential and V is the potential. The coefficient
+	alpha is increased from 0 to 1. Then do band alignment over the sequence of
+	DiagDataPoint instances. The result is the DiagDataPoint for alpha = 1 with
+	the	band indices set.
+
+	Arguments:
+	ll_mode         String. The Landau level mode, 'legacy', 'sym', or 'full'.
+	ll_max          Integer. The highest Landau level index to consider.
+	h_sym           SymbolicHamiltonian instance.
+	params          PhysParams instance.
+	steps           Integer. The number of intermediate steps. The step size for
+	                alpha is 1 / steps.
+	pot             Array or None. The potential.
+	num_cpus        Integer. Number of processes/threads for parallelization.
+	modelopts       A dict instance. The options for diagonalization.
+	bandalign_opts  A dict instance. The options for band alignment.
+
+	Returns:
+	ddp      DiagDataPoint for H = H0 + V, i.e., alpha = 1. The band indices
+	         (ddp.bindex) are set.
+	"""
+	if modelopts is None:
+		modelopts = {}
+	if bandalign_opts is None:
+		bandalign_opts = {}
+	modelopts1 = modelopts.copy()  # Make a copy so that we can change it safely
+	if 'obs' in modelopts1:
+		del modelopts1['obs']
+	if 'pot' in modelopts1:
+		del modelopts1['pot']
+
+	alphas = np.linspace(0, 1, steps + 1)
+	grid = VectorGrid('x', alphas, astype='x', prefix='a')
+	diagdata = DiagData(parallel_apply(
+		diag_hz_pot_ll, grid, (ll_mode, ll_max, h_sym, params, pot), f_kwds=modelopts1,  # mind different diag-function
+		num_processes=num_cpus, propagate_interrupt=True,
+		description="Calculating bands (k=0)"),
+		grid=grid)
+
+	# If bandalign_opts contains e0 (the location of the charge neutrality
+	# point), use it. Otherwise, get it from estimate_charge_neutrality_point().
+	if 'e0' in bandalign_opts:
+		ba_data = bandindices(diagdata, input_data=diagdata[0], params=params, **bandalign_opts)
+	else:
+		e0 = estimate_charge_neutrality_point(params, data=diagdata[0])
+		ba_data = bandindices(diagdata, e0=e0, input_data=diagdata[0], params=params, **bandalign_opts)
+
+	if get_config_bool('bandindices_adiabatic_debug'):  # debug output (to a file)
+		debug_file = "bandindices-adiabatic.csv"
+		bandindices_adiabatic_debug(debug_file, diagdata, alphas)
+	if 'verbose' in sys.argv:  # verbose output (to stdout)
+		bandindices_adiabatic_verbose(diagdata, alphas)
+
+	# For b-dependence, paramval should be reset to 0, as otherwise data cannot
+	# be interpreted as B = 0.
+	diagdata[-1].paramval = Vector(0)
+	return diagdata[-1]
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/bandalign/bandalign.py b/kdotpy-v1.0.0/src/kdotpy/bandalign/bandalign.py
new file mode 100644
index 0000000000000000000000000000000000000000..74890d3ea0d31ed5c0699cb254e6b3c74bc6d5ec
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/bandalign/bandalign.py
@@ -0,0 +1,310 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+import sys
+
+from ..cnp import estimate_charge_neutrality_point
+from ..types import DiagDataPoint
+
+from .base import BandAlignData, EnergyOutOfRangeError
+from .base import bandalign, bandalign2d, bandalign_bulk
+from .base import diagdatapoint_to_bandalignpoint, eival_e0_to_bandaligndata
+from .csvi import bandindices_from_file
+
+
+def bandindices_ddp_input(ddp, gridvar=None, e0=None, e0_relax=False):
+	"""Extract a BandAlignData with a single BandAlignPoint from a DiagDataPoint instance"""
+	if ddp.bindex is not None:
+		p0 = diagdatapoint_to_bandalignpoint(ddp, gridvar=gridvar)
+		ba_data = None if p0 is None else BandAlignData([p0])
+	else:  # Handles both cases, e0 = value and e0 = None
+		x0 = ddp.k if gridvar in ['k', ''] else ddp.paramval
+		ba_data = eival_e0_to_bandaligndata(ddp.eival, e0, x0=x0, e0_relax=e0_relax)
+		# Catch EnergyOutOfRangeError in bandindices_worker().
+	return ba_data
+
+
+### BAND INDICES INTERFACE ###
+def bandindices_worker(
+	data, input_data = None, e0 = None, k0 = None, params = None, g0 = None,
+	component = None, e0_relax = False, auto_cnp = True):
+	"""Get band indices (worker function)
+	This function prepares data for band alignment, and performs the appropriate
+	extrapolations in order to try and fill in the band indices for all points
+	in the input data.
+
+	Arguments:
+	data         DiagData instance. Result from diagonalization functions, which
+	             may be for a multi-dimensional grid.
+	input_data   BandAlignData instance, DiagDataPoint instance or None. If set,
+	             start with the band indices already defined there. If None,
+	             infer band indices from e0.
+	e0           Float or None. Energy where to 'pin' the band indices to. By
+	             default for g0 = 0), this is the energy neutral gap, between
+	             bands -1 and 1. If both e0 and input_data are not None, then e0
+	             is prioritized.
+	k0           Vector instance or None. Where to apply the energy e0 and start
+	             the band alignment. If None, use the zero point of data.
+	params       PhysParams instance.
+	g0           Integer. Gap index at energy e0. The band indices below and
+		         above the gap are set to -1 and 1 if g0 == 0, g0 - 1 and g0 if
+		         g0 < 0, and g0 and g0 + 1 if g0 > 0. Only affects the result
+		         when input_data is None and e0 is given.
+	component    String or None. Vector component that is used as 'variable'.
+	             For example, 'kx'.
+	e0_relax     True or False. If False (default), require that both the
+	             energies above and below the gap must be defined. If True,
+	             allow one to be undefined. Applies only to 'pinning' to e0.
+	auto_cnp     True or False. If True, try to determine the CNP if e0 has not
+	             been specified. If False, do not attempt to calculate the CNP.
+
+	Returns:
+	ba_data   BandAlignData instance. Contains the band indices for as many data
+	          points as possible. On failure, return None.
+	"""
+	if len(data) == 0:
+		sys.stderr.write("Warning (bandindices_worker): No data.\n")
+		return None
+
+	# Bulk mode, except bulk LL mode. If the data is from bulk LL mode, e0 is
+	# given explicitly, so we use e0 as a way to detect this mode.
+	if params is not None and params.nz == 1 and e0 is None:
+		return bandalign_bulk(data, params=params)
+
+	if k0 is None and isinstance(input_data, DiagDataPoint):
+		data_k0, k0 = data.find(input_data.k, input_data.paramval, return_index=True)
+		if data_k0 is None:
+			sys.stderr.write("ERROR (bandindices_worker): Argument input_data is a DiagDataPoint outside the DiagData instance.\n")
+			input_data = None
+	elif k0 is None:
+		data_k0, k0 = data.get_zero_point(return_index = True)
+	elif isinstance(k0, (float, np.floating, tuple)):
+		data_k0, k0 = data.find(k0, return_index = True)
+	elif isinstance(k0, (int, np.integer)):
+		if k0 < 0 or k0 >= len(data):
+			raise IndexError("Invalid k index.")
+		data_k0 = data[k0]
+	else:
+		raise TypeError("Invalid type for k0.")
+	if data_k0 is None:
+		sys.stderr.write("Warning (bandindices_worker): Anchor point for alignment could not be determined. Using alternative base point.\n")
+		data_k0, k0 = data.get_base_point(return_index = True)
+		auto_cnp = False
+
+	## Determine whether data has a single LL index
+	llindices = data.get_all_llindex()
+	llindex = None
+	if llindices is None or len(llindices) == 0:
+		pass
+	elif len(llindices) == 1:
+		llindex = llindices[0]
+	else:
+		sys.stderr.write("Warning (bandindices_worker): Data point at zero is a mixture of different LL indices.\n")
+	ef_gap_message = (llindex is None) or (llindex == 1)
+
+	## If e0 is not defined, try to get it from estimate_charge_neutrality_point()
+	if e0 is None and params is not None and auto_cnp:
+		cnp_data = input_data if isinstance(input_data, DiagDataPoint) else data_k0
+		e0 = estimate_charge_neutrality_point(params, data=cnp_data, print_gap_message=ef_gap_message)
+		g0 = 0  # The result from estimate_charge_neutrality_point() always refers to the CNP
+
+	## Prepare: From input_data, define the initial BandAlignData (ba_data)
+	if input_data is not None and not isinstance(input_data, (BandAlignData, DiagDataPoint)):
+		raise TypeError("Argument input_data must be a BandAlignData instance, DiagDataPoint instance, or None")
+	if isinstance(input_data, DiagDataPoint) and e0 is None:
+		try:
+			ba_data = bandindices_ddp_input(input_data, gridvar=data.gridvar, e0_relax=e0_relax)
+		except EnergyOutOfRangeError:
+			sys.stderr.write("ERROR (bandindices_worker): Band alignment failed, because zero energy is out of range.\n")
+			ba_data = None
+	elif isinstance(input_data, BandAlignData):  # BandAlignData: use input itself
+		ba_data = input_data
+	else:
+		x0 = data_k0.k if data.gridvar in ['k', ''] else data_k0.paramval
+		try:
+			# Note: e0 may be a numerical value or None
+			ba_data = eival_e0_to_bandaligndata(data_k0.eival, e0, g0=g0, x0=x0, e0_relax=e0_relax)
+		except EnergyOutOfRangeError:
+			sys.stderr.write("ERROR (bandindices_worker): Band alignment failed, because zero energy is out of range.\n")
+			return None
+
+	if ba_data is None:
+		sys.stderr.write("Warning (bandindices_worker): Failed.\n")
+		return None
+
+	## For low LLs (llindex = -2, -1, 0), redefine band indices based on e0
+	## Otherwise the band indices may misalign because of non-matching eigenvalues
+	if llindex in [-2, -1, 0] and len(ba_data) == 1:
+		x0 = data_k0.k if data.gridvar in ['k', ''] else data_k0.paramval
+		p0 = ba_data.get(x0)
+		if p0 is not None:
+			try:
+				p1 = p0.match_gap(data_k0.eival)
+			except ValueError:
+				# With a single precision solver, the default accuracy can be
+				# too strict and no match occurs. Retry with lower accuracy.
+				p1 = p0.match_gap(data_k0.eival, accuracy=1e-3)
+			ba_data = BandAlignData([p1])
+		else:
+			sys.stderr.write("Warning (bandindices_worker): Cannot not match energy eigenvalues for LL %i. Beware that densities may be incorrect.\n")
+
+	## Align bands and set data
+	if not isinstance(data.shape, tuple):
+		raise TypeError("Attribute data.shape must be a tuple")
+	dim = len(data.shape)
+	if dim == 1:  # 1D grid (linear)
+		if component is None and data.grid is not None:
+			component = data.grid.var[0]
+
+		ba_data = bandalign(data, ba_data=ba_data, component=component)
+		return ba_data
+	elif dim == 2:  # 2D grid; common code for polar and cartesian arrangements
+		components = [None, None] if data.grid is None else data.grid.var
+
+		ba_data = bandalign2d(data, ba_data=ba_data, components=components)
+
+		return ba_data
+	elif dim == 3:  # 3D grid; we shouldn't end up here in bulk mode
+		raise RuntimeError("For 3-dim data, the dedicated bulk mode must be used")
+	else:
+		raise ValueError("Data must be of 1, 2, or 3 dimensions")
+
+def bandindices_retry(data, params = None, e0 = None, **kwds):
+	"""Retry band index calculation.
+
+	If the band index calculation fails, retry at slightly different energies.
+
+	Arguments:
+	data         DiagData instance. Result from diagonalization functions, which
+	             may be for a multi-dimensional grid.
+	params       PhysParams instance.
+	e0           Float or None. Energy where to 'pin' the band indices to. By
+	             default for g0 = 0), this is the energy neutral gap, between
+	             bands -1 and 1. If both e0 and input_data are not None, then e0
+	             is prioritized.
+	**kwds       Keyword arguments passed to bandindices_worker(), i.e., band
+	             alignment options.
+
+	Returns:
+	b_idx     BandAlignData instance. Contains the band indices for as many data
+	          points as possible. On failure, return None.
+	e1        Float or None. If successful, the energy where the band index
+	          calculation succeeded. On failure, return None.
+	"""
+	if e0 is None:
+		b_idx = bandindices_worker(data, params = params, e0 = None, **kwds)
+		return b_idx
+	n = 0
+	for e1 in e0 + np.array([0.0, 1.0, -1.0, 0.5, -0.5, 2.0, -2.0]):
+		b_idx = bandindices_worker(data, params = params, e0 = e1, **kwds)
+		n += 1
+		if b_idx is not None:
+			if n != 1:
+				sys.stderr.write("Warning (bandindices_retry): Band indices were found successfully at target energy %s.\n" % e1)
+			return b_idx
+	sys.stderr.write("Warning (bandindices_retry): Band indices were not found after %i attempts.\n" % n)
+	return None
+
+def bandindices(data, e0 = None, g0 = None, from_file = None, retry = False, do_apply = True, **kwds):
+	"""Do band alignment. Wrapper function for several other functions defined in bandalign.py.
+
+	data         DiagData instance.
+	e0           Float or None. Energy where to 'pin' the band indices to. By
+	             default for g0 = 0), this is the energy neutral gap, between
+	             bands -1 and 1. This value takes precedence above input_data,
+	             if that is also defined in **kwds.
+	g0           Integer. Gap index at energy e0. The band indices below and
+		         above the gap are set to -1 and 1 if g0 == 0, g0 - 1 and g0 if
+		         g0 < 0, and g0 and g0 + 1 if g0 > 0.
+	from_file    String or None. Filename for extracting band index information.
+	             If None, do not use a file.
+	retry        True or False. If True, try to do the alignment for slightly
+	             different energies. This used to be the default for
+	             kdotpy-merge.py, but is probably not needed any more.
+	do_apply     True or False. If True, fill in the band indices into the
+	             DiagData instance data. If False, only return the result.
+	**kwds       Keyword arguments passed on to bandindices_worker().
+
+	Returns:
+	ba_data      BandAlignData instance, dict of BandAlignData instances, or
+	             None. The value None indicates failure of band alignment. For
+	             LL mode, return a dict, otherwise a single BandAlignData
+	             instance.
+	"""
+	# Check whether the data has LL indices
+	ll_idx = data.get_all_llindex()
+	ll = ll_idx is not None
+
+	if from_file is not None:
+		ba_data = bandindices_from_file(from_file)
+		if ll:  # split by LL index
+			for lln in ba_data:
+				data_lln = data.select_llindex(lln)
+				ba_data[lln] = bandindices_worker(data_lln, e0 = None, e0_relax = (lln < 1), input_data = ba_data[lln])
+		else:
+			ba_data = bandindices_worker(data, e0 = None, input_data = ba_data)
+	else:
+		bandindices_fn = bandindices_retry if retry else bandindices_worker
+		if ll:  # split by LL index
+			ba_data = {}
+			for lln in ll_idx:
+				data_lln = data.select_llindex(lln)
+				ba_data[lln] = bandindices_fn(data_lln, e0 = e0, g0 = g0, e0_relax = (lln < 1), **kwds)
+		else:
+			ba_data = bandindices_fn(data, e0 = e0, g0 = g0, **kwds)
+
+	if do_apply:
+		data.reset_bindex()
+		if ll and isinstance(ba_data, dict):
+			for lln in ba_data:
+				if isinstance(ba_data[lln], BandAlignData):
+					ba_data[lln].apply_to(data, llindex = lln)
+				elif ba_data[lln] is not None:
+					raise TypeError("Value should be a BandAlignData instance or None")
+					# Possible fallback: data.set_bindex(ba_data[lln], llindex = lln)
+		elif isinstance(ba_data, BandAlignData):
+			ba_data.apply_to(data)
+		elif ba_data is not None:
+			raise TypeError("Value should be a BandAlignData instance or None")
+			# Possible fallback: data.set_bindex(ba_data)
+
+	return ba_data
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/bandalign/base.py b/kdotpy-v1.0.0/src/kdotpy/bandalign/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..92458099a3d5c651e497bd9de36c18c4329c49a2
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/bandalign/base.py
@@ -0,0 +1,845 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+import sys
+
+from ..config import get_config, get_config_num
+from ..types import DiagDataPoint, DiagData
+from ..momentum import Vector
+
+### HELPER FUNCTIONS ###
+# helper functions for bandalign()
+def max_below(arr, x):
+	"""Get maximum value in array that is < x"""
+	return None if x < arr.min() else np.amax(arr[arr < x])
+
+def min_above(arr, x):
+	"""Get minimum value in array that is > x"""
+	return None if x > arr.max() else np.amin(arr[arr > x])
+
+def linear_predict_array(arr1, arr2, x1, x2, x, method):
+	"""Linearly extrapolate two arrays to a third.
+
+	Arguments:
+	arr1, arr2   Two arrays of the same size. Input value.
+	x1, x2       Two floats. Arrays arr1 and arr2 should be viewed as a function
+	             A evaluated at x1 and x2.
+	x            Float. Target value. If arr1 = A(x1) and arr2 = A(x2),
+	             calculate A(x).
+	method       'l', 'lin', 'linear'; or 's', 'sqrt'. If 'linear' (etc.), do a
+	             linear interpolation or extrapolation. If 'sqrt' (etc.),
+	             linearly interpolate or extrapolate w.r.t. sqrt(|x|). The
+	             latter is useful for quadratically spaced magnetic field
+	             values, for example. If none of this, return
+	             arr2 + (arr2 - arr1).
+
+	Returns:
+	Array of the same size as arr1 and arr2.
+	"""
+	if method in ['l', 'lin', 'linear']:
+		return arr2 if x1 == x2 else arr2 + ((x - x2) / (x2 - x1)) * (arr2 - arr1)
+	elif method in ['s', 'sqrt']:
+		return arr2 if abs(x1) == abs(x2) else arr2 + ((np.sqrt(abs(x)) - np.sqrt(abs(x2))) / (np.sqrt(abs(x2)) - np.sqrt(abs(x1)))) * (arr2 - arr1)
+	else:
+		return arr2 + (arr2 - arr1)
+
+def bandalign_test(above, below, l, where):
+	"""Test band align result.
+
+	Arguments:
+	above  Integer. Band index of the band above the gap.
+	below  Integer. Band index of the band below the gap.
+	l      Integer. ???
+	where  Object with valid str() method. The position to show in the warning
+	       message.
+
+	Returns:
+	True or False
+	"""
+	# TODO: UNUSED
+	if above <= below:
+		sys.stderr.write("Warning (bandalign): Calculation of band indices failed at (k, B) = %s. Invalid relative positions of 'above' and 'below'.\n" % where)
+		return False
+	if above < 0:
+		sys.stderr.write("Warning (bandalign): Calculation of band indices failed at (k, B) = %s. Zero gap too low. Possible fix: Increase momentum/magnetic field resolution, increase neig or decrease targetenergy.\n" % where)
+		return False
+	if below > l - 1:
+		sys.stderr.write("Warning (bandalign): Calculation of band indices failed at (k, B) = %s. Zero gap too high. Possible fix: Increase momentum/magnetic field resolution, increase neig or increase targetenergy.\n" % where)
+		return False
+	return True
+
+def get_bandalign_config():
+	"""Get configuration values for band alignment.
+
+	Returns:
+	ba_kwds   A dict instance. Keys are 'align_exp' and 'ndelta_weight'. These
+	          serve as keywords arguments that are passed to align_energies().
+	"""
+	config_dict = {}
+	align_exp = get_config('band_align_exp')
+	if align_exp.lower() == 'max':
+		config_dict['align_exp'] = 'max'
+	else:
+		try:
+			align_exp = int(align_exp)
+		except:
+			align_exp = None
+		if align_exp is None or align_exp < 0:
+			sys.stderr.write("ERROR (get_bandalign_config): Option 'band_align_exp' must be 'max' or an integer value => 0.\n")
+		else:
+			config_dict['align_exp'] = align_exp
+	ndelta_weight = get_config_num('band_align_ndelta_weight', minval = 0.0)
+	config_dict['ndelta_weight'] = ndelta_weight
+	return config_dict
+
+### CLASS DEFINITIONS ###
+
+class EnergyOutOfRangeError(ValueError):
+	"""Exception EnergyOutOfRangeError"""
+	pass
+
+class BandAlignPoint:
+	"""Container class for eigenvalues plus a minimum and maximum band index
+
+	Attributes:
+	k        Vector instance. Momentum value. (The input may be numeric, but
+	         this is converted to Vector by __init__().)
+	eival    Array of eigenvalues.
+	n        Integer. Number of eigenvalues.
+	bmin     Nonzero integer. Minimum band index (index for the smallest
+	         eigenvalue).
+	bmax     Nonzero integer. Maximum band index (index for the largest
+	         eigenvalue). (The input may be None. In that case, the value is set
+	         automatically based on bmin and n.
+	aligned_with_e0  True or False. Whether the band indices were aligned with
+	                 the zero energy. This should be set to True if the band
+	                 indices were set directly from e0, or is the band indices
+	                 are obtained from another BandAlignPoint or a DiagDataPoint
+	                 with aligned_with_e0 set to True.
+	"""
+	def __init__(self, k, eival, bmin, bmax = None, aligned_with_e0 = False):
+		if isinstance(k, Vector):
+			self.k = k
+		elif isinstance(k, (int, float, np.integer, np.floating)):
+			self.k = Vector(k)
+		else:
+			raise TypeError("Argument k must be either numeric or a Vector instance")
+		if isinstance(eival, list) or isinstance(eival, np.ndarray):
+			self.eival = np.array(eival)
+		else:
+			raise TypeError("Argument eival must be a one-dimensional list or array")
+		if not (isinstance(self.eival, np.ndarray) and self.eival.ndim == 1):
+			raise TypeError("Argument eival must be a one-dimensional list or array")
+		self.n = len(self.eival)
+		if isinstance(bmin, (int, np.integer)) and bmin != 0:
+			self.bmin = bmin
+		else:
+			raise TypeError("Argument bmin must be a nonzero integer")
+		if bmax is None:
+			self.bmax = self.bmin + self.n - 1
+			if self.bmin < 0 and self.bmax >= 0:
+				self.bmax += 1
+		elif isinstance(bmax, (int, np.integer)) and bmax != 0:
+			if bmax < self.bmin:
+				raise ValueError("bmax cannot be smaller than bmin")
+			nb = bmax - bmin + (0 if bmin < 0 and bmax > 0 else 1)
+			if nb != self.n:
+				raise ValueError("Values of bmin and bmax do not match length of eival")
+			self.bmax = bmax
+		else:
+			raise TypeError("Argument bmax must be a nonzero integer or None")
+		self.aligned_with_e0 = aligned_with_e0
+
+	def __str__(self):
+		return "<BandAlignPoint at %s, %i to %i (#=%i)>" % (self.k, self.bmin, self.bmax, self.n)
+
+	def bindex(self):
+		"""Get sorted array of band indices"""
+		bidx = np.arange(self.bmin, self.bmax + 1, 1, dtype = int)
+		bidx = bidx[bidx != 0]
+		bidx_ordered = np.zeros_like(bidx)
+		bidx_ordered[np.argsort(self.eival)] = bidx  # do 'inverse' argsort
+		return bidx_ordered
+
+	def get_zero_energy(self):
+		"""Get 'zero energy', i.e., the energy between bands -1 and 1."""
+		if self.bmax == -1:
+			return np.amax(self.eival) + 1e-6
+		elif self.bmin == 1:
+			return np.amin(self.eival) - 1e-6
+		elif self.bmin <= -1 and self.bmax >= 1:
+			sorted_eival = np.sort(self.eival)
+			return (sorted_eival[-self.bmin - 1] + sorted_eival[-self.bmin]) / 2.0
+		else:
+			return None
+
+	def set_zero_energy(self, e0, g0 = 0, relax = False):
+		"""Set zero energy.
+
+		Arguments:
+		e0     Float. Energy where to 'pin' the band indices to. By default (for
+		       g0 = 0), this is the energy neutral gap, between bands -1 and 1.
+		g0     Integer. Gap index at energy e0. The band indices below and above
+		       the gap are set to -1 and 1 if g0 == 0, g0 - 1 and g0 if g0 < 0,
+		       and g0 and g0 + 1 if g0 > 0.
+		relax  True or False. If False (default), require that both the energies
+		       above and below the gap must be defined. If True, allow one to be
+		       undefined.
+		"""
+		e_above = min_above(self.eival, e0)
+		e_below = max_below(self.eival, e0)
+		if e_above is not None and e_below is not None:
+			n_below = np.count_nonzero(self.eival <= e_below)
+		elif relax and e_above is None:
+			n_below = np.count_nonzero(self.eival <= e_below)
+		elif relax and e_below is None:
+			n_below = np.count_nonzero(self.eival < e_above)
+		else:
+			raise EnergyOutOfRangeError("Zero energy out of eigenvalue range")
+		self.bmin = g0 - n_below
+		if self.bmin >= 0:
+			self.bmin += 1
+		self.bmax = self.bmin + self.n - 1
+		if self.bmin < 0 and self.bmax >= 0:
+			self.bmax += 1
+		self.aligned_with_e0 = True
+
+	def match_gap(self, eival, accuracy = 1e-6, in_place = False):
+		"""Match and select a (sub)set of eigenvalues of the current BandAlignPoint.
+
+		Arguments:
+		eival      Numpy array. Energies that should be selected.
+		accuracy   Float. The maximum difference for comparing the values in
+		           eival to those of the present instance.
+		in_place   True or False. If True, return the present instance
+		           restricted to the given subset. If False, return a new
+		           BandAlignPoint instance.
+
+		Returns:
+		A new BandAlignPoint instance or the present instance.
+		"""
+		sel = np.amin(np.abs(self.eival[:, np.newaxis] - eival[np.newaxis, :]), axis = 1) <= accuracy
+		if np.count_nonzero(sel) == 0:
+			raise ValueError("No matching eigenvalues")
+		bindex_restricted = self.bindex()[sel]
+		n_below = np.count_nonzero(bindex_restricted < 0)
+		n_above = np.count_nonzero(bindex_restricted > 0)
+		new_bmin = 1 if n_below == 0 else -n_below
+		new_bmax = -1 if n_above == 0 else n_above
+		if in_place:
+			self.eival = self.eival[sel]
+			self.n = len(self.eival)
+			self.bmin = new_bmin
+			self.bmax = new_bmax
+		else:
+			return BandAlignPoint(
+				self.k, self.eival[sel], bmin = new_bmin, bmax = new_bmax,
+				aligned_with_e0 = self.aligned_with_e0
+			)
+
+	def align_eival(self, eival, do_sort = True, **ba_kwds):
+		"""Align a set of eigenvalues to the current BandAlignPoint instance.
+
+		Arguments:
+		eival      Numpy array. Energies that should be aligned, i.e., to which
+		           band indices should be assigned.
+		do_sort    True or False. Whether the resulting BandAlignPoint instance
+		           should contain a sorted array of eigenvalues (True) or the
+		           eigenvalues as given by eival (False).
+		**ba_kwds  Keyword arguments passed to align_energies().
+
+		Return:
+		A BandAlignPoint instance with the eigenvalues eival.
+		"""
+		if len(eival) == 0:
+			# The data point has no eivals at all. There is nothing to align.
+			# We just keep the current extrapolation at this point, which
+			# effectively extends the interpolations to the next valid data point.
+			# Keep in mind that setting band indices for a DiagDataPoint requires
+			# the same eival length and must be forced to skip in case neig == 0.
+			return self
+		eival_sort = np.sort(eival)
+		delta_i, _, _, _ = align_energies(np.sort(self.eival), eival_sort, **ba_kwds)
+		new_bmin = self.bmin - delta_i
+		if self.bmin < 0 and new_bmin >= 0:
+			new_bmin += 1
+		elif self.bmin > 0 and new_bmin <= 0:
+			new_bmin -= 1
+		return BandAlignPoint(
+			self.k, eival_sort if do_sort else eival, new_bmin,
+			aligned_with_e0=self.aligned_with_e0
+		)
+
+	def interpolate(self, other, k_new, component = None, gridvar = None):
+		"""Interpolate (or extrapolate) two BandAlignPoint instances to a third one.
+		Interpolate the two sets of energies and define band indices at a 'new'
+		momentum or magnetic-field value.
+
+		Arguments:
+		self       The present BandAlignPoint instance. 'First value'.
+		other      BandAlignPoint instance. 'Second value'.
+		k_new      Vector instance. Target momentum or magnetic-field value.
+		component  String or None. Vector component that is used as
+		           'interpolation variable'. For example, 'kx'.
+		gridvar    String or None. The grid variable, either 'k', 'b', or 'a'.
+		           This is the prefix for the vector component (see information
+		           in momentum.py)
+
+		Returns:
+		BandAlignPoint instance at k_new.
+		"""
+		if not isinstance(other, BandAlignPoint):
+			raise TypeError("Argument 'other' must be a BandAlignPoint instance")
+		if not isinstance(k_new, Vector):
+			raise TypeError("Argument 'k_new' must be a Vector instance")
+		prefix = gridvar if gridvar is not None and component is not None and component.startswith(gridvar) else ''
+		x1 = self.k.component(component, prefix = prefix)
+		x2 = other.k.component(component, prefix = prefix)
+		x_new = k_new.component(component, prefix = prefix)
+		# Choose 's'quare interpolation for magnetic field 'b' or 'l'inear
+		# interpolation otherwise.
+		method = 's' if gridvar == 'b' else 'l'
+
+		b1 = self.bindex()
+		b2 = other.bindex()
+		bmin_new = max(self.bmin, other.bmin)
+		bmax_new = min(self.bmax, other.bmax)
+		if bmax_new < bmin_new:
+			return None
+		ei1 = self.eival[(b1 >= bmin_new) & (b1 <= bmax_new)]
+		ei2 = other.eival[(b2 >= bmin_new) & (b2 <= bmax_new)]
+		ei_new = linear_predict_array(np.sort(ei1), np.sort(ei2), x1, x2, x_new, method = method)
+		aligned_with_e0 = self.aligned_with_e0 and other.aligned_with_e0
+		return BandAlignPoint(
+			k_new, np.sort(ei_new), bmin_new, bmax_new,	aligned_with_e0=aligned_with_e0
+		)
+
+def diagdatapoint_to_bandalignpoint(ddp, gridvar = None, llindex = None):
+	"""Extract BandAlignPoint from DiagDataPoint.
+
+	Arguments:
+	ddp      DiagDataPoint instance.
+	gridvar  String or None. If set, 'k', 'b', or 'a', which indicates the
+	         nature of the grid variable. This is also the prefix of the Vector
+	         instance ddp.k or ddp.paramval.
+	llindex  Integer or None. If set, select the states with that LL index only.
+
+	Returns:
+	A BandAlignPoint instance if ddp.bindex is set. None otherwise.
+
+	Development note:
+	This is not defined as a @classmethod, because it may return None under some
+	conditions, which is not a BandAlignPoint instance
+	."""
+	if not isinstance(ddp, DiagDataPoint):
+		raise TypeError("Argument 'ddp' must be a DiagDataPoint instance")
+	if ddp.bindex is None:
+		return None
+	k = ddp.k if gridvar in ['k', ''] else ddp.paramval
+	if ddp.llindex is not None:
+		if llindex is None:
+			raise ValueError("For DiagDataPoint with llindex set, the argument 'llindex' must not be None.")
+		sel = (ddp.llindex == llindex)
+		if np.count_nonzero(sel) == 0:
+			return None
+		eival = np.sort(ddp.eival[sel])
+		bmin, bmax = min(ddp.bindex[sel]), max(ddp.bindex[sel])
+	elif llindex is None:
+		eival = np.sort(ddp.eival)
+		bmin, bmax = min(ddp.bindex), max(ddp.bindex)
+	else:
+		return None
+	return BandAlignPoint(k, eival, bmin, bmax, aligned_with_e0=ddp.aligned_with_e0)
+
+class BandAlignData:
+	"""Container class for multiple BandAlignPoint instances
+
+	Attributes:
+	bapoints    List of BandAlignPoint instances.
+
+	Arguments (__init__):
+	data     DiagData instance from which the BandAlignPoints are initialized.
+	llindex  Integer or None. If set, select the states with that LL index only.
+	"""
+	def __init__(self, data, llindex = None):
+		if isinstance(data, DiagData):
+			bapoints = []
+			for ddp in data:
+				p = diagdatapoint_to_bandalignpoint(ddp, gridvar = data.gridvar, llindex = llindex)
+				if p is not None:
+					bapoints.append(p)
+		elif isinstance(data, list) and all(isinstance(p, BandAlignPoint) for p in data):
+			bapoints = data
+		else:
+			raise TypeError("Argument 'data' must be a DiagData instance or a list of BandAlignPoint instances")
+		self.bapoints = bapoints
+
+	def get(self, xval):
+		"""Get point at xval
+
+		Arguments:
+		xval  Vector instance or float/integer.
+
+		Returns:
+		The BandAlignPoint instance at xval if it exists in self.bapoints. None
+		otherwise.
+		"""
+		k = Vector(xval) if isinstance(xval, (int, float, np.integer, np.floating)) else xval
+		for p in self.bapoints:
+			if p.k == k:
+				return p
+		return None
+
+	def append(self, bapoint):
+		if not isinstance(bapoint, BandAlignPoint):
+			raise TypeError("Argument must be a BandAlignPoint instance")
+		self.bapoints.append(bapoint)
+
+	def extend(self, other):
+		if isinstance(other, BandAlignData):
+			self.bapoints.extend(other.bapoints)
+		elif isinstance(other, list) and all(isinstance(p, BandAlignPoint) for p in other):
+			self.bapoints.extend(other)
+		else:
+			raise TypeError("Argument 'other' must be a BandAlignData instance or a list of BandAlignPoint instances")
+
+	def __contains__(self, xval):
+		return self.get(xval) is not None
+
+	def __iter__(self):
+		return iter(self.bapoints)
+
+	def __len__(self):
+		return len(self.bapoints)
+
+	def get_zero_energy(self, where = 0.0):
+		"""Get zero energy, i.e., an energy between bands with indices -1 and 1.
+
+		Arguments:
+		where   Vector instance or float/integer. Return the zero energy at this
+		        momentum or magnetic field value. Default: 0.0, meaning at zero.
+
+		Returns:
+		Float if argument where refers to a valid point. None otherwise.
+		"""
+		match = None
+		for p in self.bapoints:
+			if p.k == where:
+				match = p
+				break
+		if match is None:
+			return None
+		return match.get_zero_energy()
+
+	def fill(self, data, forward = True, dk1 = -2, dk2 = -1, component = None, **ba_kwds):
+		"""Fill the present BandAlignData instance with band indices.
+
+		Form the eigenvalues and band indices at points k + dk1 and k + dk2,
+		calculate the band indices at k. Iterate over k, as to fill in the band
+		indices for all points in the DiagData instance.
+
+		Arguments:
+		data       DiagData instance. This must be a one-dimensional momentum or
+		           magnetic field dependence. If the result of the
+		           diagonalization function is of higher dimension, an
+		           appropriate one-dimensional subset must be taken.
+		forward    True or False. Whether iterate forward (True) or backward
+		           (False).
+		dk1        -2, -1, 1, or 2. First source point for interpolation, in
+		           steps away from the point of consideration.
+		dk2        -2, -1, 1, or 2. Second source point for interpolation, in
+		           steps away from the point of consideration.
+		component  String or None. Vector component that is used as
+		           'interpolation variable'. For example, 'kx'.
+		**ba_kwds  Keyword arguments passed to align_energies().
+		"""
+		nk = len(data)
+		if not (dk1 in [-2, -1, 1, 2] and dk2 in [-2, -1, 1, 2] and dk1 != dk2):
+			raise ValueError("Arguments 'dk1' and 'dk2' must be two different values out of -2, -1, 1, and 2.")
+		kstart = 0 if forward else nk - 1
+		kend = nk if forward else -1
+		kstep = 1 if forward else -1
+
+		for k in range(kstart, kend, kstep):
+			x0 = data.get_xval(k)
+			if self.get(x0) is not None:
+				continue
+			k1 = k + kstep * dk1
+			k2 = k + kstep * dk2
+			if k1 >= 0 and k1 < nk:
+				x1 = data.get_xval(k1)
+				p1 = self.get(x1)
+			else:
+				p1 = None
+			if k2 >= 0 and k2 < nk:
+				x2 = data.get_xval(k2)
+				p2 = self.get(x2)
+			else:
+				p2 = None
+			if p1 is not None and p2 is not None:  # Inter-/Extrapolate from p1 and p2
+				p0 = p1.interpolate(p2, x0, component = component, gridvar = data.gridvar)
+				palign = p0.align_eival(np.sort(data[k].eival), **ba_kwds)
+				self.append(palign)
+			elif ((dk1 == -2 and dk2 == -1) or (dk1 == 2 and dk2 == 1)) and p1 is None and p2 is not None:  # Align to p2 only
+				palign = p2.align_eival(np.sort(data[k].eival), **ba_kwds)
+				palign.k = x0
+				self.append(palign)
+			elif ((dk1 == -1 and dk2 == -2) or (dk1 == 1 and dk2 == 2)) and p1 is not None and p2 is None:  # Align to p1 only
+				palign = p1.align_eival(np.sort(data[k].eival), **ba_kwds)
+				palign.k = x0
+				self.append(palign)
+
+	def apply_to(self, data, llindex = None, reset = False):
+		"""Copy band indices into DiagData instance.
+
+		Arguments:
+		data     DiagData instance.
+		llindex  Integer or None. If set, select the states with that LL index
+		         only.
+		reset    True or False. If True, copy None values to the DiagData array
+		         if the corresponding BandAlignPoint instances of the present
+		         BandAlignData instance are missing. If False, only overwrite
+		         non-None values.
+		"""
+		if not isinstance(data, DiagData):
+			raise TypeError("Argument 'data' must be a DiagData instance")
+		# Check if this instance refers to the same x values as data; if yes,
+		# use simple indexing, otherwise use self.get(). The latter is much
+		# slower for a large number of bapoints.
+		same_x = False
+		xval = data.get_xval()
+		if len(self.bapoints) == len(data):
+			same_x = all([p.k == x for p, x in zip(self.bapoints, xval)])
+		for j, ddp in enumerate(data):
+			p0 = self.bapoints[j] if same_x else self.get(xval[j])
+			if p0 is not None:
+				if llindex is None:
+					palign = p0.align_eival(ddp.eival, do_sort = False)
+					ddp.set_bindex(
+						palign.bindex(), aligned_with_e0 = palign.aligned_with_e0
+					)
+				else:
+					ddp_ll = ddp.select_llindex(llindex)
+					palign = p0.align_eival(ddp_ll.eival, do_sort = False)
+					ddp.set_bindex(
+						palign.bindex(), eival = palign.eival, llindex = llindex,
+						aligned_with_e0 = palign.aligned_with_e0
+					)
+			elif reset:
+				ddp.set_bindex(None)
+
+def eival_e0_to_bandaligndata(eival, e0, x0=None, g0=0, e0_relax=False):
+	"""Create a BandAlignData instance with a single BandAlignPoint from energy values
+
+	Arguments:
+	eival      Numpy array. The energy eigenvalues. Note that this function
+	           tacitly sorts them.
+	e0         Float or None. If a float, it is treated as the neutral energy,
+	           where to anchor bands below and above the gap (band indices -1
+	           and 1 if g0 = 0).
+	g0         Integer or None. Gap index at energy e0. The band indices below
+		       and above the gap are set to -1 and 1 if g0 == 0, to g0 - 1 and
+		       g0 if g0 < 0, and to g0 and g0 + 1 if g0 > 0.
+	x0         Vector or float. The momentum or magnetic field value at which to
+	           create the BandAlignPoint.
+	e0_relax   True or False. If False (default), require that both the energies
+	           below and above the gap must be defined. If True, allow one to be
+	           undefined.
+
+	Returns:
+	ba_data    BandAlignData instance with a single BandAlignPoint.
+	"""
+	if x0 is None:
+		x0 = Vector(0)
+	p0 = BandAlignPoint(x0, np.sort(eival), 1)
+	if e0 is not None:
+		p0.set_zero_energy(e0=e0, g0=0 if g0 is None else g0, relax=e0_relax)
+	return BandAlignData([p0])
+
+
+def align_energies(e1, e2, align_exp = 4, ndelta_weight = 20.):
+	"""Align two energy arrays.
+	This is the 'engine' of the band alignment algorithm. Basically, it tries to
+	minimize the function avg(Delta E^e), where avg(Delta E) is the average of
+	the energy differences between the two input arrays and e is align_exp (if
+	it is numeric). We add an extra 'penalty' inversely proportional to the
+	number of matched values (ndeltas), in order to prioritize solutions with as
+	many matching values as possible. Thus, the expression
+	  [sum_i |E1_{i} - E2_{i+j}|^e] / ndeltas(j) + ndelta_weight / ndeltas(j)
+	is minimized over j.
+
+	Arguments:
+	e1             Array of floats. First set of eigenvalues.
+	e2             Array of floats. Second set of eigenvalues.
+	align_exp      Float/integer or 'max'. Exponent e of the minimization
+	               function, see above. If 'max', the minimization function is
+	               max(|Delta E|) instead. This value comes from the
+	               configuration setting 'band_align_exp'.
+	ndelta_weight  Float. Multiplication factor for the penalty for difference
+	               in number of eigenvalues. This value comes from the
+	               configuration setting 'band_align_ndelta_weight'.
+
+	Note:
+	The arrays e1 and e2 must be sorted in ascending order, otherwise the
+	behaviour is undefined.
+
+	Returns:
+	alignment  Integer. The shift in index in order to align the two arrays e1
+	           and e2.
+	e1a, e2a   Two arrays of the same size. Subsets of e1 and e2, respectively,
+	           with the aligned values. These are defined such that e1a[i]
+	           aligns with e2a[i] for all i.
+	score      Float. Value that indicates the 'quality' of the alignment. Lower
+	           values mean better alignment.
+
+	Examples:
+	align_energies([4,5], [0,1,2,3,4,5,6])  yields   4, [4,5], [4,5], (score)
+	align_energies([0,1,2,3,4,5,6], [4,5])  yields  -4, [4,5], [4,5], (score)
+	"""
+	n1 = len(e1)
+	n2 = len(e2)
+	if (n1 > 1 and np.any(np.diff(e1) < 0.0)) or (n2 > 1 and np.any(np.diff(e2) < 0.0)):
+		raise ValueError("Input arrays must be sorted in ascending order")
+
+	e2a = np.concatenate((np.ones(n1-1) * float("nan"), e2, np.ones(n1-1) * float("nan")))
+	if align_exp == "max":
+		deltas = np.nanmax(np.array([(np.abs(e2a[j:j + n1] - e1)) for j in range(0, n1 + n2 - 1)]), axis=1)
+	else:
+		deltas = np.nansum(np.array([(np.abs(e2a[j:j + n1] - e1))**align_exp for j in range(0, n1 + n2 - 1)]), axis=1)
+	ndeltas = np.count_nonzero(~np.isnan(np.array([(e2a[j:j + n1] - e1) for j in range(0, n1 + n2 - 1)])), axis=1)
+	alignment = np.argmin(deltas / ndeltas + ndelta_weight / ndeltas) - (n1 - 1)
+	e1a = e1[max(0, -alignment):min(n1, n2 - alignment)]
+	e2a = e2[max(0, alignment):min(n2, n1 + alignment)]
+	if n1 + n2 - 1 < 2:
+		score = None
+	else:
+		deltas_sorted = np.sort(deltas/ndeltas)
+		with np.errstate(divide = 'ignore'):  # do not raise a warning on division by zero
+			score = np.log10(np.divide(deltas_sorted[1], deltas_sorted[0]))
+	return alignment, e1a, e2a, score
+
+### BAND ALIGN ###
+# The diagonalization is always partial (unless in bulk mode) which creates the
+# problem of not knowing how the set of eigenvalues between two neighbouring
+# k or B values align.
+# The function bandalign first tries to align the first two sets of
+# eigenvalues by minimizing the square differences, which is done by the
+# function align_energies). For subsequent points, the least-squares-difference
+# algorithm is applied to a linear prediction from the two previous points and
+# the new set of eigenvalues.
+# The most typical mode of failure is basically an out-of-range error: The
+# least-squares method may misalign the eigenvalues; in that case, the gap will
+# no longer be in the set of eigenvalues under consideration, and then the
+# method fails. This typically is the case when there are no larger gaps, e.g.,
+# if targetenergy and neig are such that only valence-band states are returned.
+# At this moment (TODO), this can be solved only by calculating also the lowest
+# conduction band states.
+# NOTE: The function previously called continuousgap() still exists with
+# unchanged calling signature. It calls bandalign() and returns the gap
+# energies.
+# NOTE: The 'new' version bandalign() now returns the band indices of the lowest
+# and highest eigenvalues. The 'old' version (formerly called continuousgap())
+# returned the array indices of the bands below and above the gap, and the
+# aforementioned gap energy.
+
+def bandalign(data, component = None, ba_data = None, **ba_kwds):
+	"""Do band alignment, i.e., generate BandAlignData instance based on DiagData.
+
+	Arguments:
+	data       DiagData instance. This must be a one-dimensional momentum or
+		       magnetic field dependence. If the result of the diagonalization
+		       function is of higher dimension, an appropriate one-dimensional
+		       subset must be taken.
+	component  String or None. Vector component that is used as 'variable'. For
+	           example, 'kx'.
+	ba_data    BandAlignData instance. If set, use the available band indices
+	           to fill in the band indices where they are not yet defined
+	           defined. If None or an 'empty' BandAlignData instance, raise an
+	           error.
+	**ba_kwds  Keyword arguments passed to align_energies().
+
+	Returns:
+	ba_data    The updated BandAlignData instance.
+	"""
+	nk = len(data)
+	if nk == 0:
+		return None
+	if ba_data is None or len(ba_data) == 0:
+		ba_data = BandAlignData(data)
+	if ba_data is None or not isinstance(ba_data, BandAlignData):
+		raise TypeError("ba_data is expected to be a nonempty BandAlignData instance")
+	if len(ba_data) == 0:
+		sys.stderr.write("ERROR (bandalign): Band alignment has failed.\n")
+		return None
+
+	# Interpolate (x1 ... __ ... x2)
+	ba_data.fill(data, forward = True, dk1 = -1, dk2 = 1, component = component, **ba_kwds)
+	# Extrapolate forward (x1 ... x2 ... __)
+	ba_data.fill(data, forward = True, dk1 = -2, dk2 = -1, component = component, **ba_kwds)
+	# Extrapolate backward (__ ... x2 ... x1)
+	ba_data.fill(data, forward = False, dk1 = -2, dk2 = -1, component = component, **ba_kwds)
+	return ba_data
+
+ALL = slice(None)
+def bandalign2d(data, ba_data, components=None):
+	"""Do band alignment on a two-dimensional cartesian or polar grid
+	Band alignment is done first along one direction, then the perpendicular
+	direction, in a "fishbone" pattern. This can be used for cartesian as well
+	as polar coordinates.
+
+	Arguments:
+	data        DiagData instance
+	ba_data     BandAlignData instance. Should contain the base point with the
+	            respective eigenvalues and band indices.
+	components  2-tuple or None. The vector components.
+
+	Returns:
+	ba_data     BandAlignData instance. Contains the eigenvalues and band
+	            indices for all points in data.
+	"""
+	if components is None:
+		components = [None, None] if data.grid is None else data.grid.var
+	xsize, ysize = data.shape
+	# get index of zero point
+	_, k0idx = data.get_zero_point(return_index=True)
+	if k0idx is None:
+		_, k0idx = data.get_zero_point(return_index=True, ignore_paramval=True)
+		if k0idx is None:
+			sys.stderr.write("ERROR (bandalign2d): Zero point could not be found.\n")
+			return None
+		else:
+			sys.stderr.write("Warning (bandalign2d): Zero point could not be found. Retrying by ignoring magnetic field, but results may be unreliable.\n")
+
+	# get data along x direction for y = 0 (cartesian) or along r direction (polar)
+	jy0 = k0idx % ysize
+	data1 = data.get_subset((ALL, jy0))
+	ba_data = bandalign(data1, component=components[0], ba_data=ba_data)
+
+	# determine band indices in the perpendicular direction
+	# use e0s as the anchor values at the appropriate momenta
+	for jx in range(0, xsize):
+		data1 = data.get_subset((jx, ALL))
+		ba_data = bandalign(data1, component=components[1], ba_data=ba_data)
+
+	return ba_data
+
+def bandalign_bulk(data, params = None):
+	"""Do band alignment on a three-dimensional grid
+	For a bulk calculation, the indices are just determined from the orbitals
+	and applied uniformly to all data points. This function is not suitable for
+	bulk LL mode.
+
+	Arguments:
+	data        DiagData instance
+	params      PhysParams instance. The number of orbitals is taken from here.
+
+	Returns:
+	ba_data     BandAlignData instance. Contains the eigenvalues and band
+	            indices for all points in data.
+	"""
+	if any(d.eival.shape[0] != params.norbitals for d in data):
+		raise ValueError("Number of eigenvalues not equal to number of orbitals")
+	if params.norbitals == 8:
+		bmin, bmax = -6, 2
+	elif params.norbitals == 6:
+		bmin, bmax = -4, 2
+	else:
+		raise ValueError("Number of orbitals must be 6 or 8.")
+	xval = data.get_xval()
+	if any(x is None for x in xval):
+		sys.stderr.write("ERROR (bandindices_worker): Missing coordinate value.\n")
+		return None
+	ba_points = [
+		BandAlignPoint(x, d.eival, bmin, bmax, aligned_with_e0=True)
+		for x, d in zip(xval, data)
+	]
+	return BandAlignData(ba_points)
+
+def continuousgap(data):
+	"""Get energies inside gap 0 (charge neutrality gap)
+
+	Arguments:
+	data    DiagData instance which contains DiagDataPoints with band indices
+	        defined (ddp.bindex is not None).
+
+	Returns:
+	e_gap   List of floats. The in the middle of the gap, as function of the
+	        grid variable.
+	"""
+	e_gap = []
+	if any([d.bindex is None for d in data]):
+		ba_data = bandalign(data)
+		if ba_data is None:
+			return None
+		b_min = [bap.bmin for bap in ba_data]
+		b_max = [bap.bmax for bap in ba_data]
+		for b_lo, b_hi, d in zip(b_min, b_max, data):
+			n = len(d.eival)
+			eival = np.sort(d.eival)
+			if b_lo < -n or b_lo > 1:
+				return None
+			elif b_hi > n or b_hi < -1:
+				return None
+			elif b_lo == 0 or b_hi == 0:
+				raise ValueError("Illegal band index value")
+			elif b_lo == 1:
+				e_gap.append(eival[0] - 0.001)
+			elif b_hi == -1:
+				e_gap.append(eival[-1] + 0.001)
+			elif b_lo < 0 and b_hi > 0:
+				b = -b_lo - 1
+				e_gap.append((eival[b] + eival[b+1]) / 2.)
+			else:
+				raise ValueError("Illegal band index value")
+	else:
+		for d in data:
+			e_below = d.get_eival((-1,))
+			e_above = d.get_eival((1,))
+			if e_below is None and e_above is None:
+				return None
+			elif e_above is None:
+				e_gap.append(e_below + 0.001)
+			elif e_below is None:
+				e_gap.append(e_above - 0.001)
+			else:
+				e_gap.append((e_below + e_above) / 2.)
+	return e_gap
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/bandalign/csvi.py b/kdotpy-v1.0.0/src/kdotpy/bandalign/csvi.py
new file mode 100644
index 0000000000000000000000000000000000000000..166ba0d7ef53b9abef55bae17f34c899f7e70842
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/bandalign/csvi.py
@@ -0,0 +1,255 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+import sys
+import re
+
+from ..momentum import vector_from_attr
+from ..tableo import read_dict as tableo_read_dict
+from .base import BandAlignPoint, BandAlignData
+
+
+### HELPER FUNCTIONS ###
+
+def str_to_bandkey(key):
+	"""Parse string input read from csv file as band index (integer or tuple)"""
+	if isinstance(key, int):
+		return key
+	elif isinstance(key, str):
+		m = re.fullmatch(r'[-+]?\d+', key)
+		if m is not None:
+			return int(key)
+		m = re.fullmatch(r'\(?(([-+]?\d+)(,\s*[-+]?\d+)+)\)?', key)
+		if m is not None:
+			return tuple(int(s) for s in m.group(1).split(','))
+	elif isinstance(key, tuple):
+		ints = []
+		for k in key:
+			this_key = str_to_bandkey(k)
+			if isinstance(this_key, int):
+				ints.append(this_key)
+			elif this_key is not None:
+				return this_key
+		if len(ints) == 1:
+			return ints[0]  # single integer
+		elif len(ints) > 1:
+			return tuple(ints)  # multiple integers
+	return None
+
+def bindex_test_zero(bindex):
+	"""Test if a list of band index values does not contain 0"""
+	if 0 in bindex:
+		return False
+	for b in bindex:
+		if isinstance(b, tuple) and b[-1] == 0:
+			return False
+	return True
+
+def bindex_test_continuity(bindex):
+	"""Test whether the list of band indices is contiguous"""
+	if len(bindex) == 0:
+		return True
+	for b in range(min(bindex), max(bindex) + 1):
+		if b != 0 and b not in bindex:
+			return False
+	return True
+
+def parse_bandaligndata(xvalues, bindex, odata):
+	"""Interpret and check band data extracted from csv file
+
+	Arguments:
+	xvalues    Array or list. The x values.
+	bindex     Array or list or integers. The band indices. For LLs, this must
+	           be the integer band index only (last element of the tuple).
+	odata      Array. The energy values per band. The shape should be identical
+	           to (len(xvalues), len(bindex)).
+
+	Returns:
+	bandaligndata   A BandAlignData instance.
+	"""
+	bandaligndata = []
+	non_contiguous = []
+	non_increasing = []
+	for x, z in zip(xvalues, odata):
+		nanval = np.isnan(z)
+		bidx = np.asarray(bindex)[~nanval]
+		if len(bidx) == 0:
+			continue
+		bmin, bmax = min(bidx), max(bidx)
+		n_bidx = bmax - bmin + (0 if bmax > 0 and bmin < 0 else 1)
+		if n_bidx != len(bidx):  # Check if data does not contain 'holes'
+			non_contiguous.append(x)
+			continue
+		z_regular = z[~nanval]
+		if len(z_regular) > 1 and np.min(np.diff(z_regular)) < 0.0:  # Check if values are increasing
+			non_increasing.append(x)
+			continue
+		bandaligndata.append(BandAlignPoint(x, z_regular, bmin, bmax))
+
+	# Show warnings if data failed to pass the check
+	if len(non_contiguous) == 1:
+		sys.stderr.write("Warning (parse_bandaligndata): Non-contiguous data for x = %s (x = k, b)\n" % non_contiguous[0])
+	elif len(non_contiguous) == 2:
+		sys.stderr.write("Warning (parse_bandaligndata): Non-contiguous data for x = %s and %s (x = k, b)\n" % tuple(non_contiguous))
+	elif len(non_contiguous) > 2:
+		sys.stderr.write("Warning (parse_bandaligndata): Non-contiguous data for x = %s, ..., %s (at %i points; x = k, b)\n" % (non_contiguous[0], non_contiguous[-1], len(non_contiguous)))
+	if len(non_increasing) == 1:
+		sys.stderr.write("Warning (parse_bandaligndata): Non-increasing energies for x = %s (x = k, b)\n" % non_increasing[0])
+	elif len(non_increasing) == 2:
+		sys.stderr.write("Warning (parse_bandaligndata): Non-increasing energies for x = %s and %s (x = k, b)\n" % tuple(non_increasing))
+	elif len(non_increasing) > 2:
+		sys.stderr.write("Warning (parse_bandaligndata): Non-increasing energies for x = %s, ..., %s (at %i points; x = k, b)\n" % (non_increasing[0], non_increasing[-1], len(non_increasing)))
+	return BandAlignData(bandaligndata)
+
+### BAND INDICES FROM FILE ###
+
+def bandindices_from_file(filename, obs = 'E'):
+	"""Get band alignment data from a CSV 'byband' file.
+
+	The CSV files should have columns referring to momentum or magnetic field
+	values and columns whose headers are labelled by the band indices
+	(integers). The data need not be complete for this function to work; for
+	example a few aligned bands can be enough information to allow
+	bandindices_worker() to fill in the remaining ones.
+
+	Example input file (formatted as a table):
+	kx   | E     | E     | E     | E     | E     | E
+	     | -2    | -1    | 1     | 2     | 3     | 4
+	0    |       |       | -10.3 | -10.1 |  2.5  |  2.7
+	0.05 |       |       |  -6.1 |  -5.9 | 10.2  | 10.4
+	0.1  | -25.4 | -25.2 |  -2.2 |  -2.0 |       |
+	0.15 | -26.3 | -26.1 |   4.5 |   4.7 |       |
+
+	Arguments:
+	filename  String. File name of the file to be imported.
+	obs       String. Observable which to align. Only 'E' makes sense.
+
+	Returns:
+	BandAlignData instance, that contains the data present in the file.
+	"""
+	# Read file and return columns as dict
+	data = tableo_read_dict(filename)
+	if data is None:
+		return None
+	if data == {}:
+		return {}
+
+	# Look for columns with xvalues (momentum k or magnetic field b)
+	xkeys = {}
+	re_xkey = re.compile(r"[kba](r|x|y|z|theta|phi)?")
+	for key in data:
+		if isinstance(key, str) and re_xkey.fullmatch(key):
+			xkeys[key] = key
+		elif isinstance(key, tuple):
+			for k in key:
+				if re_xkey.fullmatch(k) is not None:
+					xkeys[k] = key
+					break
+	if len(xkeys) == 0:
+		return None
+	xkeycomp = [k for k in sorted(xkeys)]
+	xkeyprefix = ''
+	for pf in ['k', 'b', 'a']:
+		if all([k.startswith(pf) for k in xkeycomp]):
+			xkeyprefix = pf
+			break
+	try:
+		xkeydata = np.array([data[xkeys[k]] for k in sorted(xkeys)], dtype = float).transpose()
+	except:
+		return None
+
+	# Convert x values to Vector data
+	xvalues = []
+	for vec in xkeydata:
+		attr = {c: val for c, val in zip(xkeycomp, vec)}
+		xvalues.append(vector_from_attr(attr, prefix = xkeyprefix))
+
+	# Parse column headers and extract band indices
+	bandkeys = {}
+	for key in data:
+		bkey = str_to_bandkey(key)
+		if bkey is not None:
+			bandkeys[bkey] = key
+	if len(bandkeys) == 0:
+		sys.stderr.write("ERROR (bandalign_from_file): No band data found. This can also happen when the observable is specified and does not match.\n")
+		return None
+	if len(set(type(k) for k in bandkeys.keys())) > 1:
+		sys.stderr.write("ERROR (bandalign_from_file): Band indices must be all integers or all tuples, but types may not be mixed.\n")
+		return None
+	bindex = list(bandkeys.keys())
+	if not bindex_test_zero(bindex):
+		sys.stderr.write("ERROR (bandalign_from_file): Band index 0 is not permitted.\n")
+		return None
+	if isinstance(bindex[0], tuple):  # equivalent to: all elements tuple
+		bindex_ll = {}
+		for ll, b in bindex:
+			if ll in bindex_ll:
+				bindex_ll[ll].append(b)
+			else:
+				bindex_ll[ll] = [b]
+		# Continuity test
+		bindex_err = [ll for ll in bindex_ll if not bindex_test_continuity(bindex_ll[ll])]
+		if len(bindex_err) > 1:
+			sys.stderr.write("Warning (bandalign_from_file): Non-contiguous band data for LL " + (", ".join(sorted(bindex_err))) + ".\n")
+	else:
+		if not bindex_test_continuity(bindex):
+			sys.stderr.write("ERROR (bandalign_from_file): Band indices do not form a contiguous range.\n")
+		bindex = np.array(bindex, dtype = int)
+
+	# Extract band data
+	try:
+		odata = np.array([[float("nan") if x == "" else float(x) for x in data[bandkeys[k]]] for k in bindex], dtype = float).transpose()
+	except:
+		sys.stderr.write("ERROR (bandalign_from_file): Data file contains non-numeric data.")
+		return None
+
+	# Interpret and check band data
+	if isinstance(bindex[0], tuple):
+		llidx = np.asarray(bindex)[:, 0]  # LL indices
+		bidx = np.asarray(bindex)[:, 1]  # band indices
+		bandaligndata = {}
+		for ll in bindex_ll:
+			llsel = (llidx == ll)
+			bandaligndata[ll] = parse_bandaligndata(xvalues, bidx[llsel], odata[:, llsel])
+	else:
+		bandaligndata = parse_bandaligndata(xvalues, bindex, odata)
+	return bandaligndata
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/bandtools.py b/kdotpy-v1.0.0/src/kdotpy/bandtools.py
new file mode 100644
index 0000000000000000000000000000000000000000..b370478cff4f8d194cad3f169a1ca8e2e4758bcd
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/bandtools.py
@@ -0,0 +1,666 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+import sys
+import re
+from .config import get_config_num, get_config_bool
+from scipy.linalg import svd
+from .physconst import hbar
+
+### OVERLAP EIGENVECTORS ###
+def overlap_eivec_labels(obs, prefix = 'subband'):
+	"""Extract labels from observable string of the form subbandxyxy...
+
+	Arguments:
+	obs      String. The observable string of the form subbandxyxyxy, where xy
+	         can be either a character label (e.g., E1+, E1-, or E1) or a signed
+	         band index, preceded by B (e.g. B+1, B-12) or in parentheses. The
+	         labels xy are case insensitive.
+	prefix   String. The substring that starts the observable string. Default is
+	         'subband'.
+
+	Returns:
+	ov_labels  List of strings (band characters) or 1-tuples (band indices). The
+	           elements can be used as arguments to DiagDataPoint.get_index() to
+	           extract the eigenvector, for example.
+	"""
+	if obs is None:
+		return []
+	if not isinstance(obs, str):
+		raise TypeError("Argument obs must be string or None.")
+	if not obs.startswith(prefix):
+		return []
+	obs1 = obs[len(prefix):]
+	ov_labels = []
+	matched = [False for _ in obs1]
+	for m in re.finditer(r'([ELH][1-9][0-9]*[+-]?|B[+-][1-9][0-9]*|\([+-][1-9][0-9]*\))', obs1.upper()):
+		lb = m.group(0)
+		for i in range(m.start(), m.end()):
+			matched[i] = True
+		if lb.startswith('B'):
+			ov_labels.append((int(lb[1:]),))
+		elif lb.startswith('(') and lb.endswith(')'):
+			ov_labels.append((int(lb[1:-1]),))
+		elif lb[-1] in ['+', '-']:
+			ov_labels.append(lb)
+		else:
+			ov_labels.extend([lb + '+', lb + '-'])
+	if not all(matched):
+		nonmatched = "".join(['.' if m else c for m, c in zip(matched, obs1)])
+		sys.stderr.write("Warning (overlap_eivec_labels): The following part of the input does not correspond to a valid band label: '%s%s'.\n" % (prefix, nonmatched))
+	return ov_labels
+
+def is_bandpair(lb1, lb2):
+	"""For two subband labels, indicate if the bands could be a pair
+
+	Arguments:
+	lb1, lb2   String or 1-tuple.
+
+	Returns:
+	True or False.
+	"""
+	if isinstance(lb1, tuple) and isinstance(lb2, tuple):
+		return True  # TODO: Only subsequent values?
+	if isinstance(lb1, str) and isinstance(lb2, str):
+		if lb1.endswith('+') and lb2.endswith('-'):
+			return lb1[:-1] == lb2[:-1]
+		if lb1.endswith('-') and lb2.endswith('+'):
+			return lb1[:-1] == lb2[:-1]
+	return False
+
+def subband_labels_from_obsids(obsids):
+	"""Extract all possible subband labels from list of observables"""
+	subband_labels = []
+	for o in obsids:
+		m = re.match(r"([ELH][1-9][0-9]*[+-]?|B[+-][1-9][0-9]*|\([+-][1-9][0-9]*\))", o)
+		if m is not None:
+			subband_labels.append(m.group(0))
+	return subband_labels
+
+def get_overlap_eivec(data, bandtypes = None, obs = None):
+	"""Get eigenvectors for overlap observables (subband character)
+
+	Arguments:
+	data       DiagDataPoint.
+	bandtypes  List of strings or None. Select the bands with these character
+	           labels (like 'E1+', 'H2-'). If None, use all bands.
+	obs        String or None. Observable id. If set to 'subbandxyxyxy...',
+	           where xy are labels like 'e1', 'h2', calculate the overlaps
+	           for the subbands corresponding to these labels, if not already
+	           done so. If None or a string that does not follow this pattern,
+	           do not perform any additional action.
+
+	Returns:
+	A dict instance, whose keys are the character labels and whose values are
+	the eigenvectors (as numpy arrays).
+	"""
+	try:
+		data.char
+	except:
+		raise TypeError("Input data must be a DiagDataPoint instance")
+	if data.char is None:
+		sys.stderr.write("Warning (get_overlap_eivec): No band characters present in data\n")
+		return None
+	if data.eivec is None:
+		sys.stderr.write("Warning (get_overlap_eivec): No eigenvectors present in data\n")
+		return None
+
+	if bandtypes is None:
+		bandtypes = data.char
+
+	ov_labels = overlap_eivec_labels(obs)
+	ov_labels.extend(bandtypes)
+	## Add bandtypes argument at the end, so that the argument obs is
+	## prioritized. The construct below prevents duplicates. The return value
+	## is a (in principle unordered) dict instance. The output is sorted by the
+	## dict keys elsewhere.
+
+	overlap_eivec = {}
+	all_indices = []
+	for lb in ov_labels:
+		try:
+			idx = data.get_index(lb)
+		except:
+			idx = None
+		if idx is not None and idx not in all_indices:  # Prevent duplicates
+			all_indices.append(idx)
+			lbstr = "(%+i)" % lb if isinstance(lb, tuple) and len(lb) == 1 else str(lb)
+			overlap_eivec[lbstr] = data.eivec.T[idx]
+	if len(overlap_eivec) == 0:
+		overlap_eivec = None
+	return overlap_eivec
+
+### BAND CHARACTERS ###
+
+
+def wf_countnodes(eivec, step = 1, threshold = 0.0):
+	"""Count number of nodes (zeroes) in each component (real OR imag part) of a wave function. Legacy method.
+	This is used to determine the band character label.
+
+	Arguments:
+	eivec      Numpy array with real values. The real or imaginary part of the
+	           wave function psi(z).
+	step       Integer. The 'coarseness' of the sign comparison, i.e., the
+	           amount of lattice points dz in the test psi(z) < 0 < psi(z + dz).
+	threshold  Float. If nonzero, count a sign flip as zero only if the value
+	           psi(z + dz) exceeds this value. For example, if psi(z) < 0, only
+	           count a zero if psi(z + dz) > threshold.
+
+	Returns:
+	Integer. Number of nodes.
+	"""
+	n = len(eivec)
+	nnodes = 0
+	if step > 1:
+		sign1 = np.sign(np.sum(eivec[0:step]))
+		for j in range(step, n, step):
+			sign2 = np.sign(np.sum(eivec[j:max(n, j+step)]))
+			if sign1 * sign2 < 0:
+				nnodes += 1
+			sign1 = sign2
+	else:
+		sign1 = np.sign(eivec[0])
+		for j in range(1, n):
+			sign2 = np.sign(eivec[j])
+			if sign1 * sign2 < 0 and (np.abs(eivec[j] - eivec[j-1]) >= threshold):
+				nnodes += 1
+			sign1 = sign2
+	return nnodes
+
+def wf_countnodes_minmax(eivec, threshold = 0.0):
+	"""Count number of nodes (zeroes) in each component (real OR imag part) of a wave function. Version using local minima and maxima.
+	This is used to determine the band character label.
+
+	Arguments:
+	eivec      Numpy array with real values. The real or imaginary part of the
+	           wave function psi(z).
+	threshold  Float. If nonzero, count a sign flip as zero only if the value
+	           psi(z) exceeds this value.
+
+	Returns:
+	Integer. Number of nodes.
+	"""
+	# Calculate first derivative. Add zeros to both ends of the array to force
+	# the wavefunctions to zero at the outer interfaces.
+	d_eivec = np.diff(eivec, prepend = 0.0, append = 0.0)
+	# Get local minima and maxima by checking for sign flip in the first derivative.
+	eivecminmax = np.diff(np.sign(d_eivec)) != 0
+	# Filter out small values
+	minmaxval = eivec[eivecminmax & (np.abs(eivec) > threshold)]
+	# Count the number of spin flips between the maxima and minima and return that value
+	nnodes = np.count_nonzero(np.sign(minmaxval[1:]) * np.sign(minmaxval[:-1]) < 0)
+	return nnodes
+
+def band_types(params, eivecs, warning_level = 0, k = None, b = None):
+	"""Wrapper around band_type that takes care of the warning messages.
+	
+	Arguments:
+	params         PhysParams instance. Used for nz, ny, zres, and norbitals.
+	eivec          Numpy array. A single eigenvector as obtained from the
+	               diagonalization.
+	warning_level  0, 1, 2. Whether to show no, some, or all warnings.
+	k, b           Vector or None. Extra arguments used to determine whether
+	               nonzero momentum or magnetic field may be cause of failure.
+	
+	Returns:
+	bandtypes      List of strings. The (sub)band characters.
+	"""
+	all_warnings = {'indef_arg': 0, 'diff_re_im': 0, 'general': 0}
+	suppress_warning = (warning_level < 2)
+	bandtypes = []
+	for eivec in eivecs.T:
+		bt, warnings = band_type(params, eivec, suppress_warning = suppress_warning)
+		bandtypes.append(bt)
+		for w in warnings:
+			if warnings[w]:
+				all_warnings[w] += 1
+	if warning_level > 0:
+		if all_warnings['indef_arg'] > 0:
+			sys.stderr.write("Warning (band_types): Wave function component with indefinite complex argument for %i eigenstates.\n" % all_warnings['indef_arg'])
+		if all_warnings['diff_re_im'] > 0:
+			sys.stderr.write("Warning (band_types): Real and imaginary part with unequal number of nodes for %i eigenstates.\n" % all_warnings['diff_re_im'])
+			sys.stderr.write("This can happen if the wave function is suppressed in some regions, e.g. due to an electrostatic potential. Try to adjust the 'band_char_node_threshold' configuration value.\n")
+		if all_warnings['general'] > 0:
+			sys.stderr.write("Warning (band_type): Unable to determine band character and/or number of nodes for %i eigenstates.\n" % all_warnings['general'])
+			possible_causes = ["spin degeneracy not broken", "nonzero potential"]
+			if params.ny > 1:  # if y dimension is larger than 1
+				possible_causes.append("one-dimensional geometry")
+			if k is not None and abs(k) >= 1e-6:
+				possible_causes.append("nonzero momentum (k != 0)")
+			if b is not None and abs(b) >= 1e-6:
+				possible_causes.append("nonzero magnetic field (B != 0)")
+			sys.stderr.write("Possible causes: " + ", ".join(possible_causes) + ", etc.\n")
+	return bandtypes
+
+def band_type(params, eivec, suppress_warning = False):
+	"""Given an eigenvector, determine its (sub)band character label.
+
+	Note:
+	This should ideally be done at zero momentum and zero magnetic field only.
+	It can be done elsewhere, but that is not always reliable.
+
+	Arguments:
+	params             PhysParams instance. Used for nz, ny, zres, and
+	                   norbitals.
+	eivec              Numpy array. A single eigenvector as obtained from the
+	                   diagonalization.
+	suppress_warning   True or False. If True, hide warnings if determining
+	                   characters fails, for example for k != 0. If False, show
+	                   these warnings.
+
+	Returns:
+	String. The (sub)band character.
+	"""
+	nz = params.nz
+	ny = params.ny
+	dz = params.zres
+	norb = params.norbitals
+	nodes = []
+	orbital_threshold = get_config_num('band_char_orbital_threshold', minval = 0.0)
+	node_threshold = get_config_num('band_char_node_threshold', minval = 0.0)
+	using_minmax = get_config_bool('band_char_use_minmax')
+	make_real = get_config_bool('band_char_make_real')
+	warnings = {'indef_arg': False, 'diff_re_im': False, 'general': False}
+
+	if eivec.shape[0] == norb * ny * nz:		# for 1D
+		eivec0 = np.reshape(eivec, (ny, norb * nz))
+		eivec = np.sum(eivec0, axis = 0)
+	elif eivec.shape[0] == norb * nz:			# for 2D
+		pass
+	else:
+		raise ValueError("Eigenvectors have incorrect number of components")
+
+	for b in range(0, norb):
+		psi = eivec[b::norb]
+		psi2 = np.vdot(psi, psi)
+		if psi2 > orbital_threshold:
+			if make_real:
+				jmax = np.argmax(np.abs(psi))
+				phase = psi[jmax] / abs(psi[jmax])
+				psi /= phase
+
+			if using_minmax:
+				realnodes = wf_countnodes_minmax(np.real(psi), node_threshold * dz)
+				imagnodes = wf_countnodes_minmax(np.imag(psi), node_threshold * dz)
+			else:
+				realnodes = wf_countnodes(np.real(psi), 1, node_threshold * dz)
+				imagnodes = wf_countnodes(np.imag(psi), 1, node_threshold * dz)
+			max_re = np.amax(np.abs(np.real(psi)))
+			max_im = np.amax(np.abs(np.imag(psi)))
+
+			if make_real and max_im >= 1e-6:
+				warnings['indef_arg'] = True
+				if not suppress_warning:
+					sys.stderr.write("Warning (band_type): Wave function component with indefinite complex argument.\n")
+
+			if realnodes == imagnodes:
+				nodes.append(realnodes)
+			elif max_re < 1e-6 and max_im >= 1e-6:
+				nodes.append(imagnodes)
+			elif max_re >= 1e-6 and max_im < 1e-6:
+				nodes.append(realnodes)
+			else:
+				warnings['diff_re_im'] = True
+				if not suppress_warning:
+					sys.stderr.write("Warning (band_type): Real and imaginary part of the wave function do not have the same number of nodes (%d, %d).\n" % (realnodes, imagnodes))
+				nodes.append(None)
+		else:
+			nodes.append(None)
+
+	if norb == 8 and nodes[6] is not None and (nodes[0] is None or nodes[6] < nodes[0]) and (nodes[3] is None or nodes[6] < nodes[3]):  # mostly a hypothetical situation
+		bandchar = 'S%i+' % (nodes[6] + 1)
+	elif norb == 8 and nodes[7] is not None and (nodes[1] is None or nodes[7] < nodes[1]) and (nodes[4] is None or nodes[7] < nodes[4]):  # mostly a hypothetical situation
+		bandchar = 'S%i-' % (nodes[7] + 1)
+	elif nodes[0] is not None and nodes[3] is not None and (nodes[1] is None and nodes[2] is None and nodes[4] is None and nodes[5] is None):
+		bandchar = 'E' if nodes[0] < nodes[3] else 'L'
+		bandchar += ('%i+' % (min(nodes[0], nodes[3]) + 1))
+	elif nodes[1] is not None and nodes[4] is not None and (nodes[0] is None and nodes[2] is None and nodes[3] is None and nodes[5] is None):
+		bandchar = 'E' if nodes[1] < nodes[4] else 'L'
+		bandchar += ('%i-' % (min(nodes[1], nodes[4]) + 1))
+	elif nodes[2] is not None and (nodes[0] is None and nodes[1] is None and nodes[3] is None and nodes[4] is None and nodes[5] is None):
+		bandchar = 'H%i+' % (nodes[2] + 1)
+	elif nodes[5] is not None and (nodes[0] is None and nodes[1] is None and nodes[2] is None and nodes[3] is None and nodes[4] is None):
+		bandchar = 'H%i-' % (nodes[5] + 1)
+	else:
+		bandchar = '??'
+		warnings['general'] = True
+		if not suppress_warning:
+			sys.stderr.write("Warning (band_type): Unable to determine band character and/or number of nodes. Are we at k = 0?\n")
+	if "verbose" in sys.argv:
+		print("Nodes:", " ".join(["--" if n is None else "%2i" % n for n in nodes]), "  ", bandchar)
+	return bandchar, warnings
+
+
+### BULK ORBITAL TYPE
+def set_orbitaltype(data, set_it = True):
+	"""Get bulk orbital character labels.
+
+	Arguments:
+	data      DiagData instance. Result from (bulk) diagonalization.
+	set_it    True or False. Whether to set the labels in the DiagData instance
+	          data. If False, only return, but do not set the labels.
+
+	Returns:
+	List of strings. The orbital character labels in the same order as the
+	states at the zero point in data.
+	"""
+	data_k0 = data.get_zero_point()
+	if data_k0 is None:
+		sys.stderr.write("Warning (get_orbitaltype): Can get orbital character only at 0.\n")
+		return None
+	if data_k0.obsids is None:
+		sys.stderr.write("Warning (get_orbitaltype): Observable data not present.\n")
+		return None
+
+	nstates = len(data_k0.eival)
+	zeros = np.zeros(nstates, dtype = float)
+	gamma6 = np.real(data_k0.get_observable('gamma6')) if 'gamma6' in data_k0.obsids else zeros
+	gamma8l = np.real(data_k0.get_observable('gamma8l')) if 'gamma8l' in data_k0.obsids else zeros
+	gamma8h = np.real(data_k0.get_observable('gamma8h')) if 'gamma8h' in data_k0.obsids else zeros
+	gamma7 = np.real(data_k0.get_observable('gamma7')) if 'gamma7' in data_k0.obsids else zeros
+	jz = np.real(data_k0.get_observable('jz')) if 'jz' in data_k0.obsids else zeros
+
+	bandtypes = []
+	for j in range(0, nstates):
+		if gamma6[j] >= 0.99 and gamma8l[j] < 0.01 and gamma8h[j] < 0.01 and gamma7[j] < 0.01:
+			bandtypes.append('G6')
+		elif gamma6[j] < 0.01 and gamma8l[j] >= 0.99 and gamma8h[j] < 0.01 and gamma7[j] < 0.01:
+			bandtypes.append('G8L')
+		elif gamma6[j] < 0.01 and gamma8l[j] < 0.01 and gamma8h[j] >= 0.99 and gamma7[j] < 0.01:
+			bandtypes.append('G8H')
+		elif gamma6[j] < 0.01 and gamma8l[j] < 0.01 and gamma8h[j] < 0.01 and gamma7[j] >= 0.99:
+			bandtypes.append('G7')
+		else:
+			bandtypes.append('??')
+			continue
+		if jz[j] >= 0.25:
+			bandtypes[-1] += '+'
+		elif jz[j] <= -0.25:
+			bandtypes[-1] += '-'
+	if set_it:
+		data_k0.set_char(bandtypes)
+	return bandtypes
+
+
+### DECOMPOSITION ###
+def decompose_eivec(param, eivec, mode = None, conserve_phase = False, verbose = False):
+	"""Decompose an eigenvector into a sum of products using singular value decomposition (SVD).
+	Given a state |psi> in the Hilbert space H, the result is a decomposition of
+	the form
+	  |psi> = sum_j s_j |phi_j> |chi_j>
+	where |phi_j> and |chi_j> live in Hilbert spaces H_A and H_B, respectively,
+	with H_A \\otimes H_B = H. The coefficients s_j satisfy 0 <= s_j <= 1 and are
+	called the 'singular values'. They are usually ordered in decreasing order,
+	which allows for truncation of the sum.
+
+	References:
+	[1] Wikipedia, "Singular value decomposition",
+	    https://en.wikipedia.org/wiki/Singular_value_decomposition
+	[2] SciPy, documentation for scipy.linalg.svd,
+	    https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.svd.html
+
+	Arguments:
+	param           PhysParams instance. Used for the values nz, ny, and
+	                norbitals.
+	eivec           Numpy array. The eigenvector, result of a diagonalization.
+	mode            None, 'orbital', 'yz', or 'full'. If None or 'orbital',
+	                decompose into orbital and geometric part. If 'yz',
+	                decompose into y and z+orbitals (|phi(y)> and |chi_o(z)>).
+	                If 'full', use a SVD of degree 3 (factorization of the
+	                Hilbert space into 3 factors) to decompose into y, z, and
+	                orbital parts.
+	conserve_phase  True or False. If True, try to fix the complex phases of the
+	verbose         True or False. If True, print extra diagnostic information.
+
+	Returns:
+	s       Array of singular values s_j. We truncate at |s_j| = 0.1.
+	u, v    Numpy arrays of len(s) vectors, |phi_j> and |chi_j>, respectively.
+
+	Note:
+	If mode == 'full', return s, u1, u2, u3. The three latter numpy arrays
+	contain the vectors in the three respective Hilbert spaces. The SVD for
+	degree > 2 does not behave as 'nicely' as degree = 2, so this mode should be
+	used with care.
+	"""
+	if mode is None:
+		mode = "orbital"
+	norb = param.norbitals
+	ny = param.ny
+	nz = param.nz
+	if eivec.shape == (nz * norb,):
+		ny = 1
+	if eivec.shape != (ny * nz * norb,):
+		raise ValueError
+
+	eivec1 = eivec
+	e_angles = [np.angle(x) for x in eivec if abs(x) > 1e-6]
+	if not conserve_phase and len(e_angles) > 0:
+		e_ang = np.mod(e_angles, np.pi / 2)
+		if (e_ang.max() - e_ang.min()) < 1e-3:
+			e_angle = (e_ang.min() + e_ang.max()) / 2
+			if verbose:
+				print("arg( ) = %6.1f" % (e_angle * 180. / np.pi))
+			eivec1 = eivec * np.exp(-1.j * e_angle)
+	if mode == "orbital":
+		eivec_mat = eivec1.reshape(ny * nz, norb)
+		u, s, vh = svd(eivec_mat)
+		if verbose:
+			for j, s1 in enumerate(s):
+				if abs(s1)**2 < 0.01:
+					break
+				print("%.3f: abs2 = " % abs(s1)**2, ", ".join(["%6.3f" % x for x in np.abs(vh[j, :])**2]))
+				print("       Re   = ", ", ".join(["%6.3f" % x for x in np.real(vh[j, :])]))
+				print("       Im   = ", ", ".join(["%6.3f" % x for x in np.imag(vh[j, :])]))
+				print("      arg   = ", ", ".join(["%6.1f" % (np.angle(x) * 180 / np.pi) if np.abs(x) > 1e-6 else "  ----" for x in vh[j, :]]))
+				print(", ".join(["%6.1f" % (np.angle(x) * 180 / np.pi) if np.abs(x) > 1e-6 else "  ----" for x in u[32:40, j]]))
+				u_angles = [np.angle(x) for x in u[:, j] if abs(x) > 1e-6]
+				v_angles = [np.angle(x) for x in vh[j, :] if abs(x) > 1e-6]
+				if len(u_angles) > 0 and len(v_angles) > 0:
+					u_deg = np.mod(np.array(u_angles) * 180. / np.pi, 180)
+					v_deg = np.mod(np.array(v_angles) * 180. / np.pi, 180)
+					if (u_deg.max() - u_deg.min()) < 1e-3:
+						print("arg(u) = %6.1f" % ((u_deg.min() + u_deg.max()) / 2))
+					if (v_deg.max() - v_deg.min()) < 1e-3:
+						print("arg(v) = %6.1f" % ((v_deg.min() + v_deg.max()) / 2))
+					if (u_deg.max() - u_deg.min()) < 1e-3 and (v_deg.max() - v_deg.min()) < 1e-3:
+						arg_total = (u_deg.min() + u_deg.max() + v_deg.min() + v_deg.max()) / 2
+						print("arg    = %6.1f  %6.1f" % (arg_total, np.mod(arg_total, 90)))
+			print()
+		return s, u, vh.conjugate().transpose()
+	elif mode == "yz":
+		eivec_mat = eivec1.reshape(ny, nz * norb)
+		u, s, vh = svd(eivec_mat)
+		if verbose:
+			for j, s1 in enumerate(s):
+				if abs(s1)**2 < 0.01:
+					break
+				v_angles = [np.angle(x) for x in vh[j, :] if abs(x) > 1e-6]
+				v_ang = np.mod(v_angles, np.pi / 2)
+				v_phase = 1.0
+				if len(v_ang) > 0 and (v_ang.max() - v_ang.min()) < 1e-3:
+					v_angle = (v_ang.min() + v_ang.max()) / 2
+					v_phase = np.exp(-1.j * v_angle)
+				print("%.3f: %s" % (abs(s1)**2, band_type(param, v_phase * vh[j, :], suppress_warning = False)))
+		return s, u, vh.conjugate().transpose()
+	elif mode == "full":
+		eivec_mat = eivec1.reshape(ny, nz * norb)
+		u1, s1, _ = svd(eivec_mat)
+		eivec_mat = np.transpose(eivec1.reshape(ny, nz, norb), (1, 2, 0)).reshape(nz, ny * norb)
+		u2, s2, _ = svd(eivec_mat)
+		eivec_mat = np.transpose(eivec1.reshape(ny, nz, norb), (2, 0, 1)).reshape(norb, ny * nz)
+		u3, s3, _ = svd(eivec_mat)
+		if verbose:
+			print("s1:", s1[0:8])
+			print("s2:", s2[0:8])
+			print("s3:", s3[0:8])
+		eivec_mat = eivec1.reshape(ny, nz, norb)
+		u1h = u1.conjugate().transpose()
+		u2h = u2.conjugate().transpose()
+		u3h = u3.conjugate().transpose()
+		# s = np.einsum('il,jm,kn,lmn', u1h, u2h, u3h, eivec_mat)  # very slow
+		s = np.transpose(np.dot(u2h, eivec_mat), (0, 2, 1))
+		s = np.transpose(np.dot(u3h, s), (0, 2, 1))
+		s = np.transpose(np.dot(u1h, s), (0, 2, 1))
+		s[np.abs(s) >= 1e-10] = 0.0
+		if verbose:
+			print("u3:")
+			for j in range(0, norb):
+				print("abs2 = ", ", ".join(["%6.3f" % x for x in np.abs(u3[:, j])**2]))
+			print()
+			ind = np.indices((ny, nz, norb))
+			indices = ind[:, np.abs(s) >= 1e-3].transpose()
+			values = s[np.abs(s) >= 1e-3]
+			order = np.argsort(-np.abs(values))
+			print(np.count_nonzero(s[np.abs(s) >= 1e-3]), '/', ny * nz * norb)
+			for i, v in zip(indices[order], values[order]):
+				print("%-15s: %.3f %s" % (i, np.abs(v)**2, v))
+				if np.abs(v) < 0.01:
+					break
+			# print(indices)
+		return s, u1, u2, u3
+	else:
+		raise ValueError("Invalid value for argument 'mode'")
+
+
+### DISPERSION DERIVATIVES ###
+def set_disp_derivatives(data, dedk=False, v=False):
+	"""Calculate all relevant derivatives of the dispersion.
+
+	Arguments:
+	data    DiagData instance
+	dedk    True or False. If True, add observables dedk# (where # is a
+	        component) to data. These are the bare derivative values dE/dk in
+	        units of meV nm.
+	v       True or False. If True, add observables v# (where # is a component)
+	        to data. These are the derivatives expressed as a velocity in units
+	        of 10^6 m s^-1.
+
+	Note:
+	Arguments dedk and v may not both be False.
+
+	No return value
+	"""
+	if not dedk and not v:
+		raise ValueError("At least one of the arguments dedk and v must be True")
+	if data.grid is None:
+		sys.stderr.write("Warning (set_disp_derivatives): Cannot calculate gradients if there is no grid.\n")
+		return
+	if data.get_all_bindex() is None:
+		sys.stderr.write("Warning (set_disp_derivatives): Band indices are required but not present.\n")
+		return
+
+	deriv_components = data.grid.get_derivative_components()
+	for component in deriv_components + ['abs']:
+		deriv_data = disp_derivative(data, component)
+		deriv_data = {} if deriv_data is None else deriv_data
+		for b in deriv_data:
+			if dedk:
+				data.set_observable_values('dedk' + component, deriv_data[b], (b,))
+			if v:
+				data.set_observable_values('v' + component, deriv_data[b] / hbar / 1e6, (b,))
+	return
+
+def disp_derivative(ei_data, component):
+	"""Calculate a derivative of the dispersion.
+
+	Arguments:
+	ei_data    DiagData instance. Result from diagonalization. The band indices
+	           need to be defined for this function to work.
+	component  String. A valid vector component. The component of the derivative
+	           to calculate.
+
+	Returns:
+	A dict instance, whose keys are the band indices and the values (arrays)
+	encode the derivative as function of momentum.
+	"""
+	bidx = ei_data.get_all_bindex()
+	if bidx is None:
+		sys.stderr.write("Warning (disp_derivative): Band indices are required but not present.\n")
+		return {}
+	deriv_data = {}
+	if len(ei_data) == 1:
+		sys.stderr.write("Warning (disp_derivative): Cannot calculate gradients at a single data point.\n")
+		return {}
+	if ei_data.grid is None:
+		sys.stderr.write("Warning (disp_derivative): Cannot calculate gradients if there is no grid.\n")
+		return {}
+
+	dim = len(ei_data.shape)
+	if component.startswith('dk'):
+		component = component[2:]
+	if component.startswith('k'):
+		component = component[1:]
+	if component == '':
+		component = ei_data.grid.vtype if dim == 1 and ei_data.grid.vtype in ['x', 'y', 'z'] else 'r'
+	if component not in ['r', 'abs', 'x', 'y', 'z', 'phi', 'theta']:
+		sys.stderr.write(f"Warning (disp_derivative): Invalid value {component} for argument component.\n")
+		return {}
+
+	# Get values and variable names of data grid
+	karray, kvar, _, _ = ei_data.grid.get_var_const(return_tuples=True, use_prefix=False)
+	if ei_data.grid.degrees:
+		karray = tuple(val * np.pi / 180 if var in ['phi', 'theta'] else val for val, var in zip(karray, kvar))
+	# Get indices of variables in the full VectorGrid, because the data grid
+	# might be a subset, as constant values are not considered.
+	gridvar = ei_data.grid.get_components()
+	co_idx = [gridvar.index(co) for co in kvar]
+	# Determine indexing argument for get_plot_coord()
+	indexing_arg = "index" if dim == 1 else f"index{dim}d"
+
+	if component == 'abs':
+		grad_coeff = ei_data.grid.gradient_length_coeff()
+		for b in bidx:
+			_, eival = ei_data.get_plot_coord(b, indexing_arg)
+			grad = np.atleast_2d(np.gradient(eival, *karray))
+			abs_grad = np.sqrt(sum([grad_i**2 * grad_coeff[ci] for grad_i, ci in zip(grad, co_idx)]))
+			deriv_data[b] = abs_grad.flatten()
+	else:
+		jacobian = ei_data.grid.jacobian(component, unit=True)
+		for b in bidx:
+			_, eival = ei_data.get_plot_coord(b, indexing_arg)
+			grad = np.atleast_2d(np.gradient(eival, *karray))
+			deriv = sum([grad_i * jacobian[ci] for grad_i, ci in zip(grad, co_idx)])
+			deriv_data[b] = deriv.flatten()
+
+	if deriv_data == {}:
+		sys.stderr.write(f"Warning (disp_derivative): No derivatives were calculated because the dispersion data and the derivative component/type {component} are incompatible.\n")
+
+	return deriv_data
diff --git a/kdotpy-v1.0.0/src/kdotpy/batchtools.py b/kdotpy-v1.0.0/src/kdotpy/batchtools.py
new file mode 100644
index 0000000000000000000000000000000000000000..806b6407325f4026258399c9cef5236775ac431d
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/batchtools.py
@@ -0,0 +1,429 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import sys
+import os.path
+
+from multiprocessing import cpu_count as mp_cpu_count
+from platform import system
+from subprocess import PIPE, Popen
+from .cmdargs.tools import isint, isfloat
+from . import cmdargs
+from .config import get_config
+
+
+def parse_batch_args(sysargv):
+	"""Parse arguments for kdotpy-batch
+	
+	This function extracts the @-variables and the command to run, plus a few
+	auxiliary variables (ncpu, nprocess).
+	
+	Arguments:
+	sysargv  List of strings. The command line arguments, analogous to sys.argv.
+	
+	Returns:
+	allvar   List of strings. The names of the @-variables
+	allval   List. The values of the @-variables.
+	cmd      List of strings. The command line to execute.
+	opts     A dict instance. Contains options: npcu and nprocess.
+	"""
+	allvar = []
+	cmd_at = None
+	ncpu = None
+	nprocess = None
+
+	# Get arguments specific for 'kdotpy-batch.py'
+	for arg in sysargv[1:]:
+		if arg.startswith("@"):
+			var = arg[1:]
+			if "@" in var:
+				sys.stderr.write("ERROR (parse_batch_args): No second '@' allowed in variable name\n")
+				exit(1)
+			elif "{" in var or "}" in var:
+				sys.stderr.write("ERROR (parse_batch_args): Variable name cannot contain '{' or '}'.\n")
+				exit(1)
+			elif var in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']:
+				sys.stderr.write("ERROR (parse_batch_args): Variable name cannot be a single digit.\n")
+				exit(1)
+			elif var != "" and var in allvar:
+				sys.stderr.write("ERROR (parse_batch_args): Variable names must be unique.\n")
+				exit(1)
+			else:   # zero-length variable identifier is explicitly included
+				allvar.append(var)
+		elif arg == 'cpu' or arg == 'cpus':
+			if nprocess is not None:
+				sys.stderr.write("ERROR (parse_batch_args): Specification of number of cpus and number of processes cannot be combined.\n")
+				exit(1)
+			try:
+				ncpu = int(sysargv[sysargv.index(arg) + 1])
+			except:
+				sys.stderr.write("ERROR (parse_batch_args): Argument '%s' must be followed by a number.\n")
+				exit(1)
+		elif arg == 'parallel' or arg == 'proc':
+			if nprocess is not None:
+				sys.stderr.write("ERROR (parse_batch_args): Specification of number of cpus and number of processes cannot be combined.\n")
+				exit(1)
+			try:
+				nprocess = int(sysargv[sysargv.index(arg) + 1])
+			except:
+				sys.stderr.write("ERROR (parse_batch_args): Argument '%s' must be followed by a number.\n")
+				exit(1)
+		elif arg == 'do' or arg == '--' or arg == 'cmd':
+			cmd_at = sysargv.index(arg)
+			break
+
+	if cmd_at is None or cmd_at >= len(sysargv) - 1:
+		sys.stderr.write("ERROR (parse_batch_args): No command specified. The command to be run must follow 'do', 'cmd' or '--'.\n")
+		exit(1)
+	if len(allvar) == 0:
+		sys.stderr.write("ERROR (parse_batch_args): No variable ranges specified.\n")
+
+	# Parse arguments. Handle the @ arguments; range and list specifications.
+	allval = []
+	for v in allvar:
+		vrange = cmdargs.grid(args = '@' + v, from_argv = sys.argv[:cmd_at])
+		if vrange == [] or vrange is None:
+			argn = sys.argv.index("@" + v)
+			if cmd_at < argn + 1 or not sys.argv[argn + 1].startswith("["):
+				sys.stderr.write("ERROR (parse_batch_args): Variable specification must be followed by range or list.\n")
+				exit(1)
+			str_to_parse = " ".join(sys.argv[argn + 1: cmd_at])
+			str_to_parse = str_to_parse.split('@')[0]
+			str_to_parse1 = ""
+			j = 0
+			for s in str_to_parse:
+				if s == '[':
+					j += 1
+				elif s == ']':
+					j -= 1
+				str_to_parse1 += s
+				if j == 0:
+					break
+
+			if str_to_parse1.count('[') != str_to_parse1.count(']'):
+				sys.stderr.write("ERROR (parse_batch_args): Unbalanced brackets [ and ].\n")
+				exit(1)
+			list_of_str = str_to_parse1[1:-1].split(",")
+			vrange = [int(x) if isint(x) else float(x) if isfloat(x) else x.strip() for x in list_of_str]
+		elif all([x == int(x) for x in vrange]):
+			vrange = [int(x) for x in vrange]
+
+		allval.append(vrange)
+	
+	# Extract command-line template for the program to execute
+	cmd = sys.argv[cmd_at + 1:]
+	# Define options dict
+	opts = {'ncpu': ncpu, 'nprocess': nprocess}
+	return allvar, allval, cmd, opts
+
+def ncpu_nprocess(cmd, ncpu = None, nprocess = None, **opts):
+	"""Extract number of parallel jobs to be run
+	
+	Arguments:
+	cmd       List of strings. Command line arguments.
+	ncpu      None or int. The number of cpus extracted by parse_batch_args().
+	nprocess  None or int. The number of processes extracted by
+	          parse_batch_args().
+	**opts    Unused arguments.
+	
+	Returns:
+	ncpu      Integer. The number of cpus.
+	nprocess  Integer. The number of processes.
+	"""
+	try:
+		maxcpu = mp_cpu_count()
+	except:
+		sys.stderr.write("Warning (kdotpy-batch.py): Could not determine number of CPUs.\n")
+		maxcpu = None
+	cmd_ncpu = 1 if maxcpu is None else maxcpu
+	for j, arg in enumerate(cmd[:-1]):
+		if arg in ["cpu", "cpus", "ncpu"]:
+			try:
+				cmd_ncpu = int(cmd[j+1])
+			except:
+				pass
+			else:
+				break
+
+	if nprocess is None:
+		if ncpu is not None:
+			nprocess = ncpu // cmd_ncpu
+		else:
+			nprocess = 1 if maxcpu is None else (maxcpu // cmd_ncpu)
+	if nprocess < 1:
+		nprocess = 1
+		sys.stderr.write("Warning (kdotpy-batch.py): Minimum number of processes is one (sequential run).\n")
+	if nprocess > 1 and cmd_ncpu > 1:
+		ncpu = nprocess * cmd_ncpu
+		if maxcpu is not None and ncpu > maxcpu:
+			sys.stderr.write("Warning (kdotpy-batch.py): Number of requested parallel processes is larger than the available number of CPUs. This is not recommended, because of a significant performance penalty.\n")
+	return ncpu, nprocess
+
+def nice_command(niceness, command):
+	"""Provide "niceness" command for "nicing" subprocesses
+
+	Arguments:
+	niceness  Integer >= 0. The target 'nice' value of the command.
+	command   List of strings. The command line arguments.
+
+	Returns:
+	niced_cmd  List of strings. This is the list command prepended by the
+	           appropriate 'nice' command.
+	"""
+	nicecmd = []
+	if system() == 'Windows':
+		# no nice command
+		return command
+	if isinstance(niceness, int):
+		if niceness < 0:
+			sys.stderr.write("Warning (nice_command): Minimum niceness is 0\n")
+			nicecmd = ["nice", "-n", "0"]
+		elif niceness > 0 and niceness <= 19:
+			nicecmd = ["nice", "-n", "%i" % niceness]
+		elif niceness >= 20:
+			sys.stderr.write("Warning (nice_command): Maximum niceness is 19\n")
+			nicecmd = ["nice", "-n", "19"]
+		elif niceness == 0:  # let's make this explicit
+			pass
+	elif niceness is None:
+		pass
+	else:
+		raise TypeError("Niceness must be an integer")
+	if not isinstance(command, list):
+		raise TypeError("Argument command must be a list")
+	return nicecmd + command
+
+
+def run_and_wait(cmdline_args, niceness = 0, out = None, err = None):
+	"""Runs a command without monitoring
+
+	The only way to interrupt the execution is by Ctrl-C (or by sending a signal
+	to the external program from somewhere else, e.g., another shell or htop).
+
+	NOTE: It is typically a bad idea to terminate any of the worker processes.
+	It should be safe to terminate/abort/interrupt the kdotpy-batch.py parent
+	process, but this is currently not the case. (TODO)
+
+	TODO: The exit statuses are not returned correctly in a multithreaded run.
+	This can probably be solved only with a dedicated parallelization function
+	for kdotpy-batch.py (which is probably a good idea anyway). Try:
+	  python3 kdotpy-batch.py cpu 4 @x 0 10 / 10 do sleep -1
+	versus
+	  python3 kdotpy-batch.py cpu 1 @x 0 10 / 10 do sleep -1
+	(sleep -1 is an illegal command that returns exit code 1)
+
+	Arguments:
+	cmdline_args  List of strings. The command line arguments.
+	niceness      Integer >= 0. The target 'nice' value of the command.
+	out           File, PIPE or None. Refers to stdout stream.
+	err           File, PIPE or None. Refers to stderr stream.
+
+	Returns:
+	exitstatus  Integer. The exit status of the command. This is 0 when
+	            successful, nonzero if an error has occurred.
+	p_stdout    Contents of stdout output from the command
+	p_stderr    Contents of stderr output from the command
+	"""
+
+	try:
+		nicecmd = nice_command(niceness, command = [])
+	except:
+		nicecmd = []
+
+	if out is None:
+		out = PIPE
+	if err is None:
+		err = PIPE
+
+	try:
+		p = Popen(nicecmd + cmdline_args, stdout=out, stderr=err)
+	except OSError as e:
+		sys.stderr.write("ERROR (run_and_wait): OSError %i %s\n" % (e.errno, e.strerror))
+		return None, None, None
+	except:
+		sys.stderr.write("ERROR (run_and_wait): Generic error\n")
+		return None, None, None
+
+	try:
+		p_stdout, p_stderr = p.communicate()
+	except KeyboardInterrupt:
+		sys.stderr.write("Warning (run_and_wait): Keyboard interrupt\n")
+		p.terminate()
+		exitstatus = p.poll()
+		return exitstatus, None, None
+	except:
+		sys.stderr.write("Warning (run_and_wait): Abnormal termination. Unhandled exception.\n")
+		return None, None, None
+	else:
+		exitstatus = p.poll()
+
+	if exitstatus != 0:
+		sys.stderr.write("Warning (run_and_wait): Termination with exit status %i\n" % exitstatus)
+
+	return exitstatus, p_stdout, p_stderr
+
+
+def multi_values(*lists):
+	"""Give all combinations of the elements of lists as tuples
+
+	For example: multi_values(['a', 'b'], [1, 2, 3]) yields
+	  [('a', 1), ('a', 2), ('a', 3), ('b', 1), ('b', 2), ('b', 3)]
+
+	Arguments:
+	lists   One or more lists (tuples or other iterables also allowed).
+
+	Returns:
+	allval   List of tuples. Tuples containing all combinations of elements from
+	         the input lists.
+	strides  Tuple. Its i-th element corresponds to the size of one step
+	         ('stride') to advance one element corresponding to	the i-th list.
+	         In the above example, this would be (3, 1).
+	"""
+	dim = len(lists)
+	if dim == 0:
+		return [], ()
+	allval = [(x,) for x in lists[0]]
+	strides = [1]
+	for v in lists[1:]:
+		old_allval = allval
+		allval = [x + (y,) for x in old_allval for y in v]
+		strides = [s * len(v) for s in strides]
+		strides.append(1)
+	return allval, tuple(strides)
+
+def replace_float(val, fmt = '%s', smart_decimal = True):
+	fstr = fmt % val
+	if smart_decimal and '.' in fstr:
+		fstr = fstr.rstrip('0')
+		return fstr + '0' if fstr.endswith(".") else fstr  # strip zeros but keep one after decimal point
+	else:
+		return fstr
+
+def replace_and_do_command(idx, val, nval, cmd, allvar, strides):
+	"""Do '@' replacements and run the command.
+
+	In the list of command arguments, replace indicators with '@' by the
+	appropriate input values. Then, execute the resulting command.
+	This function is typically iterated over the 'allval' output of
+	multi_values().
+
+	The following replacements are done:
+	  @@            Total number of values (= nval)
+	  @0            Index (= idx)
+	  @1, @2, ...   Index of the i-th variable
+	  @varname      Value of variable with name 'varname' (specified in 'allvar')
+	NOTE: The index outputs @0, @1, @2, ... are 1-based (1, ..., m). The
+	arguments to this function use 0-based (0, ..., m-1) indexing, however.
+
+	Arguments:
+	idx      Integer. Index (counter) of the run; position in the 'allval' list
+	val      n-tuple. Input value (n-tuple)
+	nval     Integer. Total number of values (length of 'allval')
+	cmd      List of strings. The command line arguments.
+	allvar   List of strings. The variable names.
+	strides  List or tuple of integers. Step size of i-th variable; output of
+		     multi_values().
+
+	Returns:
+	exitstatus  Integer. Exit code of the executed command
+	"""
+	cmd1 = []
+
+	# Numeric multi-index
+	ji = []
+	ji1 = idx
+	for s in strides:
+		ji.append(ji1 // s)
+		ji1 -= ji[-1] * s
+
+	# Define formatting function for values
+	float_format_cfg = get_config('batch_float_format')
+	if float_format_cfg.endswith('.'):
+		float_format = float_format_cfg.rstrip('.')
+		smart_decimal = True
+	else:
+		float_format = float_format_cfg
+		smart_decimal = False
+	try:
+		float_format % -1.0
+	except:
+		sys.stderr.write("Warning (replace_and_do_command): Invalid format for float (configuration option 'batch_float_format').\n")
+		raise
+
+	# Define replacements as dict
+	def fmt(val):
+		return replace_float(val, fmt = float_format, smart_decimal = smart_decimal) if isinstance(val, float) else str(val)
+	replacements = {'@@': "%i" % nval, '@0': "%i" % (idx + 1)}
+	for i, v in enumerate(val):
+		replacements["@%i" % (i+1)] = str(ji[i] + 1)
+		replacements["@{%s}" % allvar[i]] = fmt(v)
+		replacements["@" + allvar[i]] = fmt(v)
+
+	# Perform replacements
+	def replace_with_dict(s, d):
+		for from_, to in d.items():
+			s = s.replace(from_, to)
+		return s
+	cmd1 = [replace_with_dict(c, replacements) if '@' in c else c for c in cmd]
+
+	# Determine output id; take from the command list
+	# Default is the index (counter value)
+	outputid = ".%i" % (idx + 1)
+	for j, c in enumerate(cmd1[:-1]):
+		if c in ["out", "outputid", "outputname", "outid", "outfile"]:
+			outputid = cmd1[j+1]
+
+	if 'dryrun' in sys.argv:
+		print("%i: " % (idx + 1) + ", ".join(["@%s = %s" % (var, fmt(v)) for var, v in zip(allvar, val)]))
+		print(" ".join(cmd1))
+		exitstatus = 0
+	else:
+		curdir, outdir = cmdargs.outdir(do_chdir = False, replacements = replacements)
+		fout = open(os.path.join(outdir, "stdout%s.%s" % (outputid, get_config('batch_stdout_extension'))), "w")
+		ferr = open(os.path.join(outdir, "stderr%s.%s" % (outputid, get_config('batch_stderr_extension'))), "w")
+		exitstatus, stdout, stderr = run_and_wait(cmd1, niceness = 5, out = fout, err = ferr)
+		fout.close()
+		ferr.close()
+	if exitstatus is None:
+		raise KeyboardInterrupt
+
+	return exitstatus
+
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/berry.py b/kdotpy-v1.0.0/src/kdotpy/berry.py
new file mode 100644
index 0000000000000000000000000000000000000000..655ee0396a2a20a6dc78e01bc908c2deae8ef7ed
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/berry.py
@@ -0,0 +1,446 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+import sys
+
+from .physconst import eoverhbar
+from .momentum import Vector
+from .lltools import delta_n_ll
+from .hamiltonian import hz_sparse_ll_full
+
+def dham(ham, k, dk, b, params, dim = 2, **modelopts):
+	"""Differentiate the Hamiltonian at k.
+	Use the general definition f'(k) = (f(k + dk) - f(k - dk)) / 2 dk.
+
+	Arguments:
+	ham          Callable. The Hamiltonian function.
+	k            Vector instance. Momentum value at which to evaluate.
+	dk           Float. Differentiation step size is 2 dk, see formula above.
+	params       PhysParams instance. Needed to evaluate the Hamiltonian.
+	dim          2 or 3. Dimension of the vector that is passed to the
+	             Hamiltonian function and the number of derivative components
+	             being returned.
+	**modelopts  Keyword arguments that are passed to the Hamiltonian function.
+
+	Returns:
+	dxham   Numpy array (2-dim) or Scipy sparse matrix. The kx derivative.
+	dyham   Numpy array (2-dim) or Scipy sparse matrix. The ky derivative.
+	dzham   Numpy array (2-dim) or Scipy sparse matrix. The kz derivative. Only
+	        if dim is 3.
+	"""
+	if dim == 2:
+		kx, ky = k.xy()
+		hamxp = ham([kx + dk, ky], b, params, **modelopts)
+		hamxm = ham([kx - dk, ky], b, params, **modelopts)
+		hamyp = ham([kx, ky + dk], b, params, **modelopts)
+		hamym = ham([kx, ky - dk], b, params, **modelopts)
+		return (hamxp - hamxm) / 2.0 / dk, (hamyp - hamym) / 2.0 / dk
+	elif dim == 3:
+		kx, ky, kz = k.xyz()
+		hamxp = ham([kx + dk, ky, kz], b, params, **modelopts)
+		hamxm = ham([kx - dk, ky, kz], b, params, **modelopts)
+		hamyp = ham([kx, ky + dk, kz], b, params, **modelopts)
+		hamym = ham([kx, ky - dk, kz], b, params, **modelopts)
+		hamzp = ham([kx, ky, kz + dk], b, params, **modelopts)
+		hamzm = ham([kx, ky, kz - dk], b, params, **modelopts)
+		return (hamxp - hamxm) / 2.0 / dk, (hamyp - hamym) / 2.0 / dk, (hamzp - hamzm) / 2.0 / dk
+	else:
+		raise ValueError("Argument 'dim' must be either 2 or 3.")
+
+
+def berrycurv_k(datak, ham, params, which = (-4, 4), dk = 0.001, dim = 2, sort = False, **modelopts):
+	"""Calculate the local Berry curvature at k for a selection of states.
+
+	Use Berry_i = sum_j Im[ <i|dx H|j> × <j|dy H|i> / (E_i-E_j)^2 ]
+	(× = cross product; simplified representation of actual definition)
+
+	Arguments:
+	datak        DiagDataPoint instance. Usual diagonalization output for a
+	             single momentum value.
+	ham          Callable. The Hamiltonian function.
+	params       PhysParams instance. Needed to evaluate the Hamiltonian.
+	which        2-tuple of floats or integers. If integers, select an interval
+	             of band indices. If floats, select an energy interval.
+	dk           Float. Differentiation step size is 2 dk.
+	dim          2 or 3. Number of momentum dimensions.
+	sort         True or False. Whether to sort the result by eigenvalue.
+	**modelopts  Keyword arguments that are passed to the Hamiltonian function.
+
+	Returns:
+	bcurv    For 2 dimensions, an array of floats. The values of the Berry
+	         curvature for the selected states. For 3 dimensions, a tuple of
+	         three such arrays (bcurv_x, bcurv_y, bcurv_z), i.e., the components
+	         of the Berry curvature vectors.
+	eival1   Eigenvalues of the selected states
+	bidx1    Band indices of the selected states
+	"""
+	# Handle band selection
+	if isinstance(which, tuple) and len(which) == 2:
+		if isinstance(which[0], (float, np.floating)) or isinstance(which[1], (float, np.floating)):
+			datak0 = datak.select_eival(which)
+		elif isinstance(which[0], (int, np.integer)) or isinstance(which[1], (int, np.integer)):
+			datak0 = datak.select_bindex(which)
+		else:
+			raise TypeError("Argument which can be a 2-tuple of integers or floats, one of which might be replaced by None.")
+	elif which is None:
+		datak0 = datak
+	else:
+		raise TypeError("Argument which can be a 2-tuple of integers or floats, one of which might be replaced by None.")
+
+	# Extract data (selection)
+	neig1 = datak0.neig
+	eival1 = datak0.eival
+	eivec1T = datak0.eivec
+	b_idx1 = datak0.bindex
+	# Extract data (all)
+	neig2 = datak.neig
+	eival2 = datak.eival
+	eivec2T = datak.eivec
+	eivec2T_H = eivec2T.conjugate().transpose()
+
+	if eivec1T is None:
+		sys.stderr.write("ERROR (Berrycurv_k): Missing eigenvectors.\n")
+		exit(1)
+
+	# We need to get rid of 'split'. This can be done silently; the splitting
+	# Hamiltonian should cancel out in the derivative of the Hamiltonian.
+	# TODO: This may not be the case for some of the 'more advanced' splitting
+	# types.
+	if 'split' in modelopts:
+		del modelopts['split']
+
+	# Differentiate the Hamiltonian
+	if dim == 2:
+		dxham, dyham = dham(ham, datak.k, dk, datak.paramval, params, dim = 2, **modelopts)
+	elif dim == 3:
+		dxham, dyham, dzham = dham(ham, datak.k, dk, datak.paramval, params, dim = 3, **modelopts)
+	else:
+		raise ValueError("Argument 'dim' must be either 2 or 3.")
+
+	# Apply Eq. (2.15) of Bernevig's book
+	e1, e2 = np.meshgrid(eival1, eival2)
+	denom = np.reciprocal(e1 - e2, out = np.zeros_like(e1), where = (e1 != e2))  # 1 / (E_i-E_j)
+	vx = np.multiply(eivec2T_H @ (dxham @ eivec1T), denom)  # <j|dx H|i> / (E_i-E_j)
+	vy = np.multiply(eivec2T_H @ (dyham @ eivec1T), denom)
+	vxd = vx.conjugate().transpose()
+	vyd = vy.conjugate().transpose()
+	if dim == 2:
+		bcurv = [-np.imag(np.dot(vxd[q, :], vy[:, q]) - np.dot(vyd[q, :], vx[:, q])) for q in range(0, neig1)]  # apply cross product
+		if sort:
+			order = np.argsort(eival1)
+			return np.array(bcurv)[order], eival1[order], None if b_idx1 is None else b_idx1[order]
+		else:
+			return bcurv, eival1, b_idx1
+	else:
+		vz = np.multiply(eivec2T_H @ (dzham @ eivec1T), denom)
+		vzd = vz.conjugate().transpose()
+		# apply cross product component-wise
+		bcurv_x = [-np.imag(np.dot(vyd[q, :], vz[:, q]) - np.dot(vzd[q, :], vy[:, q])) for q in range(0, neig1)]
+		bcurv_y = [-np.imag(np.dot(vzd[q, :], vx[:, q]) - np.dot(vxd[q, :], vz[:, q])) for q in range(0, neig1)]
+		bcurv_z = [-np.imag(np.dot(vxd[q, :], vy[:, q]) - np.dot(vyd[q, :], vx[:, q])) for q in range(0, neig1)]
+		if sort:
+			order = np.argsort(eival1)
+			return (np.array(bcurv_x)[order], np.array(bcurv_y)[order], np.array(bcurv_z)[order]), eival1[order], None if b_idx1 is None else b_idx1[order]
+		else:
+			return (bcurv_x, bcurv_y, bcurv_z), eival1, b_idx1
+
+
+### SYMBOLIC ###
+
+def berrycurv_ll(eidata, magn, h_sym, ll_max, which = (4, 4), norb = 8, sort = True):
+	"""Calculate the Berry curvature for a selection of states for LL Hamiltonians.
+	This calculation uses the symbolic version of the LL Hamiltonian.
+
+	Use Berry_i = sum_j Im[ <i|dx H|j> × <j|dy H|i> / (E_i-E_j)^2 ]
+	(× = cross product; simplified representation of actual definition)
+
+	Arguments:
+	eidata        DiagDataPoint instance. Usual diagonalization output for a
+	              single magnetic field value.
+	magn          Float. Value of the magnetic field.
+	h_sym         SymbolicHamiltonian instance. The Hamiltonian.
+	ll_max        NOT USED. Placeholder for uniform call signature (argument
+	              list) compared to berrycurv_ll_full(). The value is always
+	              replaced by the value extracted from eidata.
+	which         2-tuple of floats or integers. If integers, select an interval
+	              of band indices. If floats, select an energy interval.
+	norb          6 or 8. Number of orbitals.
+	sort          True or False. Whether to sort the result by eigenvalue.
+
+	Returns:
+	bcurv    An array of floats. The values of the Berry curvature for the
+	         selected states.
+	eival1   Eigenvalues of the selected states
+	llidx1   LL indices of the selected states
+	"""
+	debug = False  # set to True for debug output
+
+	# Handle band selection
+	if isinstance(which, tuple) and len(which) == 2:
+		if isinstance(which[0], (float, np.floating)) or isinstance(which[1], (float, np.floating)):
+			eidata1 = eidata.select_eival(which)
+		elif isinstance(which[0], (int, np.integer)) or isinstance(which[1], (int, np.integer)):
+			eidata1 = eidata.select_bindex(which)
+		else:
+			raise TypeError("Argument which can be a 2-tuple of integers or floats, one of which might be replaced by None.")
+	elif which is None:
+		eidata1 = eidata
+	else:
+		raise TypeError("Argument which can be a 2-tuple of integers or floats, one of which might be replaced by None.")
+	eidata2 = eidata  # 'Rename' for consistency
+
+	if eidata1.eivec is None:
+		sys.stderr.write("ERROR (Berrycurv_ll): Missing eigenvectors.\n")
+		exit(1)
+	if eidata1.llindex is None:
+		sys.stderr.write("ERROR (Berrycurv_ll): Missing LL indices.\n")
+		exit(1)
+
+	# Differentiate the Hamiltonian
+	dxham = h_sym.deriv("x")
+	dyham = h_sym.deriv("y")
+
+	# Initialize
+	magnz = magn.z() if isinstance(magn, Vector) else magn
+	ll_min, ll_max = min(eidata2.llindex), max(eidata2.llindex)
+	delta_n_vec = delta_n_ll(norb, magnz)
+
+	all_bcurv = []
+	all_eival = []
+	all_b_idx = []  # TODO
+	all_ll_idx = []
+	for n in range(ll_min, ll_max + 1):
+		bands1 = (eidata1.llindex == n)
+		neig1 = np.count_nonzero(bands1)
+		bcurv = np.zeros(neig1, dtype = float)
+		eival1 = eidata1.eival[bands1]
+		b_idx1 = None if eidata1.bindex is None else eidata1.bindex[bands1]  # TODO
+		for dn in [-4, -3, -2, -1, 0, 1, 2, 3, 4]:
+			if ll_min <= n + dn <= ll_max:
+				dxmat = dxham.ll_evaluate((n + dn, n), magn, delta_n_vec, all_dof = True)
+				dymat = dyham.ll_evaluate((n + dn, n), magn, delta_n_vec, all_dof = True)
+				if np.abs(dxmat).max() < 1e-10 and np.abs(dymat).max() < 1e-10:
+					continue
+
+				bands2 = (eidata2.llindex == n + dn)
+				eival2 = eidata2.eival[bands2]
+				e1, e2 = np.meshgrid(eival1, eival2)
+				denom = np.reciprocal(e1 - e2, out = np.zeros_like(e1), where = (e1 != e2))
+
+				eivec1T = eidata1.eivec[:, bands1]
+				eivec2T = eidata2.eivec[:, bands2]
+				eivec2T_H = eivec2T.conjugate().transpose()
+
+				vx = np.multiply(eivec2T_H @ (dxmat @ eivec1T), denom)
+				vy = np.multiply(eivec2T_H @ (dymat @ eivec1T), denom)
+				vxd = vx.conjugate().transpose()
+				vyd = vy.conjugate().transpose()
+
+				bcurv += np.array([-np.imag(np.dot(vxd[j, :], vy[:, j]) - np.dot(vyd[j, :], vx[:, j])) for j in range(0, neig1)])
+
+		order = np.argsort(eival1)  # order by energy eigenvalue
+		all_bcurv.append(bcurv[order])
+		all_eival.append(eival1[order])
+		all_ll_idx.append(np.full((neig1,), n))
+
+		if debug and n <= 3:
+			lBinv2 = eoverhbar * magnz
+			print("Berry curvature (LL %i, B = %s, lB^2 = %.2f nm^2):" % (n, magnz, 1 / lBinv2))
+			print(" Index      E (meV)   Berry F (nm^2)   Chern C")
+			for b, e in zip(bcurv[order][::-1], eival1[order][::-1]):
+				print("(%3i, ---) %8.3f :  %13.3f  %8.3f" % (n, e, b, b * lBinv2))
+			print()
+
+	all_eival = np.concatenate(all_eival)
+	if sort:
+		order = np.argsort(all_eival)
+		return np.concatenate(all_bcurv)[order], all_eival[order], np.concatenate(all_ll_idx)[order]
+	else:
+		return np.concatenate(all_bcurv), all_eival, np.concatenate(all_ll_idx)
+
+def chernnumber_ll(eidata, magn, *args, **kwds):
+	"""Calculate the Chern numbers for a selection of states for LL Hamiltonians.
+	This calculation uses the symbolic version of the LL Hamiltonian.
+
+	The result is Chern_i = Berry_i * lB^-2 where lB is the inverse magnetic
+	length. This product can be viewed as implicit integration over momentum
+	space. The result is dimensionless, i.e., the unit is 1.
+
+	Arguments:
+	eidata         DiagDataPoint instance. Usual diagonalization output for a
+	               single magnetic field value.
+	magn           Float. Value of the magnetic field.
+	*args, **kwds  Further arguments passed to berrycurv_ll
+
+	Returns:
+	chern    An array of floats. The values of the Chern numbers for the
+	         selected states.
+	eival1   Eigenvalues of the selected states
+	llidx1   LL indices of the selected states
+	"""
+	magnz = magn.z() if isinstance(magn, Vector) else magn
+	lBinv2 = eoverhbar * magnz
+	bcurv, eival1, llidx1 = berrycurv_ll(eidata, magn, *args, **kwds)
+	return bcurv * lBinv2, eival1, llidx1
+
+### SYMBOLIC FULL ###
+
+def berrycurv_ll_full(eidata, magn, h_sym, ll_max, which = (4, 4), norb = 8):
+	"""Calculate the Berry curvature for a selection of states for LL Hamiltonians in full mode.
+	This calculation uses the symbolic version of the full LL Hamiltonian.
+
+	Use Berry_i = sum_j Im[ <i|dx H|j> × <j|dy H|i> / (E_i-E_j)^2 ]
+	(× = cross product; simplified representation of actual definition)
+
+	Arguments:
+	eidata        DiagDataPoint instance. Usual diagonalization output for a
+	              single magnetic field value.
+	magn          Float. Value of the magnetic field.
+	h_sym         SymbolicHamiltonian instance. The Hamiltonian.
+	ll_max        Integer. Maximum LL index to consider.
+	which         2-tuple of floats or integers. If integers, select an interval
+	              of band indices. If floats, select an energy interval.
+	norb          6 or 8. Number of orbitals.
+
+	Returns:
+	bcurv    An array of floats. The values of the Berry curvature for the
+	         selected states.
+	eival1   Eigenvalues of the selected states
+	llidx1   LL indices of the selected states (for full mode, always None)
+	"""
+	debug = False  # set to True for debug output
+
+	# Handle band selection
+	if isinstance(which, tuple) and len(which) == 2:
+		if isinstance(which[0], (float, np.floating)) or isinstance(which[1], (float, np.floating)):
+			eidata1 = eidata.select_eival(which)
+		elif isinstance(which[0], (int, np.integer)) or isinstance(which[1], (int, np.integer)):
+			eidata1 = eidata.select_bindex(which)
+		else:
+			raise TypeError("Argument which can be a 2-tuple of integers or floats, one of which might be replaced by None.")
+	elif which is None:
+		eidata1 = eidata
+	else:
+		raise TypeError("Argument which can be a 2-tuple of integers or floats, one of which might be replaced by None.")
+	eidata2 = eidata  # 'Rename' for consistency
+
+	if eidata1.eivec is None:
+		sys.stderr.write("ERROR (Berrycurv_ll): Missing eigenvectors.\n")
+		exit(1)
+
+	# Initialize
+	magnz = magn.z() if isinstance(magn, Vector) else magn
+	delta_n_vec = delta_n_ll(norb, magnz)
+
+	# Differentiate the Hamiltonian
+	dxham = h_sym.deriv("x")
+	dyham = h_sym.deriv("y")
+	dxmat = hz_sparse_ll_full(dxham, ll_max, magn, norb = norb)
+	dymat = hz_sparse_ll_full(dyham, ll_max, magn, norb = norb)
+
+	neig1 = len(eidata1.eival)
+
+	e1, e2 = np.meshgrid(eidata1.eival, eidata2.eival)
+	denom = np.reciprocal(e1 - e2, out = np.zeros_like(e1), where = (e1 != e2))
+	# Scale down eigenvectors if necessary
+	matdim = dxmat.shape[0]
+	vecdim = eidata1.eivec.shape[0]
+	if vecdim > matdim:
+		if vecdim % (norb * (ll_max + 3)) != 0:
+			raise ValueError('Incompatible dimension')
+		mask = []
+		nz = vecdim // (norb * (ll_max + 3))  # integer division
+		for n in range(-2, ll_max + 1):
+			mask.append(np.tile(delta_n_vec + n >= 0, nz))
+		mask = np.concatenate(mask)
+		if np.count_nonzero(mask) != matdim:
+			raise ValueError('Eigenvector downscaling: Got dimension %i, expected %i' % (np.count_nonzero(mask), matdim))
+		eivec1T = eidata1.eivec[mask, :]
+		eivec2T = eidata2.eivec[mask, :]
+	else:
+		eivec1T = eidata1.eivec
+		eivec2T = eidata2.eivec
+	eivec2T_H = eivec2T.conjugate().transpose()
+
+	vx = np.multiply(eivec2T_H @ (dxmat @ eivec1T), denom)
+	vy = np.multiply(eivec2T_H @ (dymat @ eivec1T), denom)
+	vxd = vx.conjugate().transpose()
+	vyd = vy.conjugate().transpose()
+
+	bcurv = np.array([-np.imag(np.dot(vxd[j, :], vy[:, j]) - np.dot(vyd[j, :], vx[:, j])) for j in range(0, neig1)])
+
+	if debug:
+		order = np.argsort(eidata1.eival)[::-1]  # order by energy eigenvalue, decreasing
+		lBinv2 = eoverhbar * magnz
+		print("Berry curvature (LL full, B = %s, lB^2 = %.2f nm^2):" % (magn, 1 / lBinv2))
+		print(" Index      E (meV)   Berry F (nm^2)   Chern C")
+		for b, e in zip(bcurv[order], eidata1.eival[order]):
+			print("(---, ---) %8.3f :  %13.3f  %8.3f" % (e, b, b * lBinv2))
+		print()
+
+	order = np.argsort(eidata1.eival)  # order by energy eigenvalue
+	return bcurv[order], eidata1.eival[order], None
+
+def chernnumber_ll_full(eidata, magn, *args, **kwds):
+	"""Calculate the Chern numbers for a selection of states for LL Hamiltonians.
+	This calculation uses the symbolic version of the full LL Hamiltonian.
+
+	The result is Chern_i = Berry_i * lB^-2 where lB is the inverse magnetic
+	length. This product can be viewed as implicit integration over momentum
+	space. The result is dimensionless, i.e., the unit is 1.
+
+	Arguments:
+	eidata         DiagDataPoint instance. Usual diagonalization output for a
+	               single magnetic field value.
+	magn           Float. Value of the magnetic field.
+	*args, **kwds  Further arguments passed to berrycurv_ll_full
+
+	Returns:
+	chern    An array of floats. The values of the Chern numbers for the
+	         selected states.
+	eival1   Eigenvalues of the selected states
+	llidx1   LL indices of the selected states (for full mode, always None)
+	"""
+	magnz = magn.z() if isinstance(magn, Vector) else magn
+	lBinv2 = eoverhbar * magnz
+	bcurv, eival1, llidx1 = berrycurv_ll_full(eidata, magn, *args, **kwds)
+	return bcurv * lBinv2, eival1, llidx1
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/bhz.py b/kdotpy-v1.0.0/src/kdotpy/bhz.py
new file mode 100644
index 0000000000000000000000000000000000000000..83918550ee8dd74a832b7c3cd6374b4100a582e8
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/bhz.py
@@ -0,0 +1,734 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+from scipy.sparse import dok_matrix
+import sys
+
+from .physconst import eoverhbar
+from .symbolic import SymbolicMatrix, SymbolicHamiltonian
+from .momentum import Vector
+from .diagonalization import diagonalization as diag
+from .hamiltonian import hz_sparse_split
+from .config import get_config_bool
+
+### HELPER FUNCTION ###
+def bandtype_str(b):
+	"""Print band label (str) or band index (positive or negative integer)."""
+	if isinstance(b, str):
+		return b
+	elif isinstance(b, (int, np.integer)):
+		return ("+%i" % b) if b > 0 else ("%i" % b)
+	elif b is None:
+		return '--'
+	else:
+		raise TypeError("Argument b must be str, int, or None.")
+
+
+### PERTURBATION THEORY ###
+def pert_bhz(data, params, energy = 0.0, neig = 50, spin_obs = None, bands_a = None, bands_lower = None, bands_upper = None, e_gap = 0.0, k0 = 0.0, b0 = None, verbose = False, pot = None, **modelopts):
+	"""Do the perturbation theory (Löwdin partitioning)
+	Reduce the large Hamiltonian to a small one, typically 4x4, treating the
+	other nearby bands perturbatively.
+
+	Recipe:
+	First, organize the bands.
+	Then, take the 'reference' Hamiltonian h0, which is the diagnonal matrix of
+	subband energies of the A bands at k0 = (0, 0).
+	The perturbation h' is h(k) - h0, where k runs over the momenta in data.
+
+	Arguments:
+	data       DiagData instance.
+	params     PhysParams instance. Is passed to Hamiltonian.
+	energy, neig, lattice_reg, split, ignorestrain
+		       Parameters affecting the calculation. Is passed to
+		       diagonalization function and Hamiltonian.
+	bands_a    List of integers. Bands considered as A bands, considered exactly
+	           at zero momentum.
+	bands_lower, bands_upper
+		       Lists of integers. Bands below and above the A bands that are
+		       considered perturbatively.
+	k0         Vector instance or float. Momentum value at which to take h0.
+	           This point must be present in argument data.
+	b0         Float. Magnetic field at which to expand the Hamiltonian. Note
+	           that this option does not take into account orbital fields for
+	           the out-of-plane component.
+	verbose    True or False. If True, print some diagnostic information.
+
+	Returns:
+	[nl, na, nu]  List of 3 integers. Number of bands below A bands ('lower'),
+	              number of A bands, number of bands above A bands ('upper').
+	h0per         Numpy array (2-dim). Zero order perturbative result.
+	h1per         SymbolicMatrix instance. First order perturbative result.
+	h2per         SymbolicMatrix instance. Second order perturbative result.
+	"""
+	if 'lattice_reg' not in modelopts or modelopts['lattice_reg'] is True:
+		sys.stderr.write("Warning (do_BHZ): Lattice regularization is disabled for BHZ calculation\n")
+	modelopts['lattice_reg'] = False
+
+	nl = len(bands_lower)
+	na = len(bands_a)
+	nu = len(bands_upper)
+
+	## Set "anchor" value (momentum)
+	nk = len(data)
+	if isinstance(k0, (float, np.floating)):
+		k0 = Vector(k0, 0.0, astype = 'xy')
+	elif isinstance(k0, (int, np.integer)):
+		if k0 < 0 or k0 > nk:
+			sys.stderr.write("ERROR (pert_bhz): Invalid k index.\n")
+			exit(1)
+		k0 = data.get_momenta()[k0]
+
+	if k0 is not None and k0 != 0:
+		sys.stderr.write("Warning (pert_bhz): BHZ calculation at k != 0 is not recommended.\n")
+	if b0 is None:
+		b0 = 0.0
+	elif (isinstance(b0, Vector) and abs(b0.z()) > 1e-6) or (isinstance(b0, (float, np.floating)) and abs(b0) > 1e-6):
+		sys.stderr.write("Warning (pert_bhz): BHZ calculation for out-of-plane magnetic fields (B_z != 0) is not recommended. It evaluates the Zeeman and exchange coupling at B, but orbital fields are neglected for the out-of-plane component.\n")
+
+	## Determine symbolic hamiltonians; diagonalize at k0
+	k0x, k0y = k0.xy()
+	h_sym = SymbolicHamiltonian(hz_sparse_split, (params,), modelopts, b0 = b0, exclude_zero = False, kx = k0x, ky = k0y)
+	hprime_sym = SymbolicHamiltonian(hz_sparse_split, (params,), modelopts, b0 = b0, exclude_zero = True, kx = k0x, ky = k0y)
+	diagdata_k0 = diag.hsym_k0(h_sym, params, k0 = 0, orbital_magn = b0, return_eivec = True, energy = energy, neig = neig, pot = pot, **modelopts)
+
+	## Normalize wave function phase; use the same phase choice as for wave
+	## function plots.
+	diagdata_k0.set_eivec_phase(inplace = True)
+
+	## Determine sets of eigenvalues and eigenvectors for A and B
+	## (B = L + U)
+	idx_a = [diagdata_k0.get_index(b[0]) for b in bands_a]
+	idx_b = [diagdata_k0.get_index(b[0]) for b in bands_lower] + [diagdata_k0.get_index(b[0]) for b in bands_upper]
+
+	e_a = [diagdata_k0.eival[j] for j in idx_a]
+	e_b = [diagdata_k0.eival[j] for j in idx_b]
+
+	avec = [diagdata_k0.eivec[:, j] for j in idx_a]
+	bvec = [diagdata_k0.eivec[:, j] for j in idx_b]
+
+	if verbose:
+		print("Wave function phase standardization:")
+		for av, idx in zip(avec, idx_a):
+			# Check whether largest component is purely real,
+			# i.e., verify result of diagdata_k0.set_eivec_phase().
+			jmax = np.argmax(np.abs(av))
+			psimax = av[jmax]
+			phase = psimax / abs(psimax)
+			char = diagdata_k0.char[idx] if diagdata_k0.char[idx] is not None else diagdata_k0.bindex[idx] if diagdata_k0.bindex is not None else "--"
+			print("Eigenvector %4s: Index %i, orbital %i, position (z) %i / %i, phase %7.2f deg, Re = %9.2e, Im = %9.2e" % (char, jmax, jmax % params.norbitals, jmax // params.norbitals, params.nz, np.angle(phase, deg = True), np.real(psimax), np.imag(psimax)))
+
+	## H(0), (diagonal) zero-th order matrix
+	hper0 = dok_matrix((na, na), dtype=complex)
+	for j in range(0, na):
+		hper0[j, j] = e_a[j]
+	if verbose:
+		print(hper0)
+
+	hper0 = hper0.tocsc()
+
+	if verbose:
+		hper0a = np.zeros((na, na), dtype=complex)
+		eB = eoverhbar * params.magn.z() if isinstance(params.magn, Vector) else eoverhbar * params.magn
+		h0 = h_sym.evaluate((0.0, 0.0), eB)
+		for j in range(0, na):
+			for i in range(0, na):
+				hper0a[i, j] = avec[i].conjugate() @ h0 @ avec[j]
+		print("H(0)=")
+		print(np.real_if_close(hper0a))
+		print()
+
+	## Do the perturbation theory symbolically
+	hper1 = hprime_sym.hper1(avec)
+	hper2 = hprime_sym.hper2(e_a, e_b, avec, bvec, verbose = False)
+	# Change to 'verbose = True' for more diagnostic output
+
+	return [nl, na, nu], hper0, hper1, hper2
+
+def select_bhz_bands(ddp, bands_a, bands_upper, bands_lower, loc_obs = None, par_obs = None, loc_threshold = 0.95, intermediate_bands = False):
+	"""Determine numbers of A, lower B, and upper B bands.
+
+	Arguments:
+	ddp            DiagDataPoint instance.
+	params         PhysParams instance. Is passed to Hamiltonian.
+	bands_a        List of integers. Bands considered as A bands, considered
+	               exactly at zero momentum.
+	bands_lower, bands_upper
+		           Lists of integers. Bands below and above the A bands that are
+		           considered perturbatively. If None, determine these bands
+		           automatically.
+	loc_obs        Integer or string. Index or id of the localization
+	               observable. The values of this observable should express the
+	               probability density inside the 'confinement' (e.g., the
+	               quantum well).
+	par_obs        Integer, string or None. Index or id of the parity
+	               observable. If None, do not consider parity values.
+	loc_threshold  Float. The minimal localization (value of localization
+	               observable) for a band to be considered localized.
+	intermediate_bands
+	               True or False. If False (default), do not allow a
+	               non-contiguous selection of bands. If True, allow B bands in
+	               between A bands; this only takes effect if the input are band
+	               labels. (NOTE: Setting True is experimental, it may cause
+	               errors and exceptions further on.)
+
+	Returns:
+	bands_lower  Integer. Number of perturbative bands below the A bands
+	             ('lower').
+	bands_a      Integer. Number of A bands.
+	bands_upper  Integer. Number of perturbative bands above the A bands
+	             ('upper').
+
+	Note:
+	On failure, return None, None, None.
+	"""
+	## Initialization
+	localization_warning = 0
+	band_error = False
+
+	## Test which type the A band input is (labels or amounts)
+	na_below = None
+	na_above = None
+	a_bindex = []
+	a_labels = []
+	if bands_a is None:
+		na_below, na_above = 2, 2
+		intermediate_bands = False  # This can be done silently, as the input defines a contiguous set of A bands by definition.
+	elif isinstance(bands_a, tuple) and all(isinstance(b, int) for b in bands_a):
+		if len(bands_a) == 1:
+			na_below = 2 * (bands_a[0] // 4)  # integer (floor) division
+			na_above = bands_a[0] - na_below
+		elif len(bands_a) == 2:
+			na_below, na_above = bands_a
+		else:
+			raise TypeError("If argument bands_a is a tuple, it must be length 1 or 2.")
+		intermediate_bands = False  # This can be done silently, as the input defines a contiguous set of A bands by definition.
+	elif isinstance(bands_a, list) and all(isinstance(b, (int, str)) for b in bands_a):
+		for b in bands_a:
+			if isinstance(b, str):
+				if b.endswith('+') or b.endswith('-'):
+					a_labels.append(b)
+				else:
+					a_labels.append(b + '+')
+					a_labels.append(b + '-')
+			if isinstance(b, int):
+				a_bindex.append(b)
+		a_labels = list(set(a_labels))
+	else:
+		raise TypeError("Argument bands_a must be None, 1- or 2-tuple, or list.")
+
+	## Extract information from DiagDataPoint instance
+	try:
+		ddp.sort_by_eival(inplace = True, reverse = False)
+		eival = ddp.eival
+		bandtypes = ddp.char
+		bindex = ddp.bindex
+	except:
+		raise ValueError("Band data at k=0 is not available")
+	if bandtypes is None and len(a_labels) > 0:
+		sys.stderr.write("ERROR (select_bhz_bands): Band characters are given as arguments but not available at k=0.\n")
+		return None, None, None
+	if bindex is None:
+		sys.stderr.write("ERROR (select_bhz_bands): Band indices are not available at k=0.\n")
+		return None, None, None
+
+	## Determine A bands
+	band_cat = []
+	if na_below is not None and na_above is not None:
+		band_cat = ["A" if (0 < b <= na_above) or (0 > b >= -na_below) else "" for b in bindex]
+	else:
+		band_cat = ["A" if b in a_bindex else "" for b in bindex]
+		# if bandtypes is None and len(a_labels) > 0:
+		# 	raise ValueError("bandtypes should not be None")
+		if len(a_labels) > 0:
+			for j, b in enumerate(bandtypes):
+				if b in a_labels:
+					if band_cat[j] == "A":
+						sys.stderr.write("Warning (Select_BHZ_Bands): Band %i/%s is selected doubly by both index and label.\n")
+					band_cat[j] = "A"
+
+	a_indices = [j for j, bc in enumerate(band_cat) if bc == 'A']
+	if len(a_indices) < 2:
+		sys.stderr.write("ERROR (Select_BHZ_Bands): Number of A bands must be at least 2.\n")
+		band_error = True
+
+	min_a = min(a_indices)
+	max_a = max(a_indices) + 1
+
+	if (not intermediate_bands) and max_a - min_a != len(a_indices):
+		sys.stderr.write("ERROR (Select_BHZ_Bands): There may be no other bands between the A bands.\n")
+		band_error = True
+
+	a_bindex1 = [bindex[j] for j in a_indices]
+	if min(a_bindex1) > 0 or max(a_bindex1) < 0:
+		sys.stderr.write("Warning (Select_BHZ_Bands): The gap does not lie in the A set.\n")
+
+	show_intermediate_band_warning = False
+	for j, e in enumerate(eival):
+		if min_a <= j < max_a:
+			if band_cat[j] != "A":
+				bt_bi = bandtypes[j] if bandtypes is not None else bindex[j]
+				sys.stderr.write("ERROR (Select_BHZ_Bands): Band %s (#%i at %8.3f meV) should be A but is %s\n" % (bt_bi, j, e, "X" if band_cat[j] == "" else band_cat[j]))
+				if intermediate_bands:
+					band_cat[j] = "U"  # mark them as 'upper bands'; U or L does not matter eventually
+					show_intermediate_band_warning = True
+				else:
+					band_cat[j] = "X"
+					band_error = True
+			else:
+				band_cat[j] = "A"
+		elif isinstance(bands_upper, (int, np.integer)) and max_a <= j < max_a + bands_upper:
+			band_cat[j] = "U"
+		elif isinstance(bands_lower, (int, np.integer)) and min_a > j >= min_a - bands_lower:
+			band_cat[j] = "L"
+		else:
+			band_cat[j] = "X"
+	if show_intermediate_band_warning:
+		sys.stderr.write("Warning (Select_BHZ_Bands): Non-contiguous selection of A bands allowed by argument intermediate_bands = True. This is an experimental feature. Further errors and/or exceptions may occur.\n")
+
+	# Determine L, U bands automatically, based on localization
+	loc_val = ddp.get_observable(loc_obs)
+	if loc_val is not None:
+		if bands_upper is None:
+			bands_upper = 0
+			for j in range(max_a, ddp.neig):
+				loc = np.real(loc_val[j])
+				if loc >= loc_threshold:
+					band_cat[j] = "U"
+					bands_upper += 1
+				else:
+					break
+		if bands_lower is None:
+			bands_lower = 0
+			for j in range(min_a - 1, -1, -1):
+				loc = np.real(loc_val[j])
+				if loc >= loc_threshold:
+					band_cat[j] = "L"
+					bands_lower += 1
+				else:
+					break
+	elif bands_upper is None or bands_lower is None:
+		sys.stderr.write("ERROR (Select_BHZ_Bands): Cannot determine L, U bands automatically\n")
+		return None, None, None
+
+	## Tabulate bands
+	## Use color output if on a color terminal
+	print()
+	print("Energy      b  Char %s Set Localiz." % ("" if par_obs is None else " P  "))
+	loc_val = ddp.get_observable(loc_obs)
+	par_val = ddp.get_observable(par_obs)
+	for j, e, bi, bcl in reversed(list(zip(list(range(0, ddp.neig)), eival, bindex, band_cat))):
+		if loc_val is not None:
+			loc = np.real(loc_val[j])
+			if loc < loc_threshold and bcl != '' and bcl in 'LAU':
+				localization_warning += 1
+		else:
+			loc = None
+			localization_warning = -1
+		if par_val is not None:
+			isopz = np.real(par_val[j])
+			isopzstr = " +  " if isopz > 0.9 else " -  " if isopz < -0.9 else " ?  "
+		else:
+			isopzstr = ""
+		if not COLOR_DISPLAY or bcl == '':
+			c1, c2 = "", ""
+		elif loc is None or loc >= loc_threshold:
+			c1 = "\x1b[1;33m" if bcl == 'U' else "\x1b[1;34m" if bcl == 'L' else "\x1b[1;32m" if bcl == 'A' else ""
+			c2 = "\x1b[0m"
+		elif loc < 0.95:
+			c1 = "\x1b[31m" if bcl in 'UL' else "\x1b[1;31m" if bcl == 'A' else ""
+			c2 = "\x1b[0m"
+		else:
+			c1 = "\x1b[1;33m" if bcl == 'U' else "\x1b[1;34m" if bcl == 'L' else "\x1b[1;31m" if bcl == 'A' else ""
+			c2 = "\x1b[0m"
+		bt = '--' if bandtypes is None else bandtypes[j]
+		print("%s%8.3f  %3i  %-4s %s %-3s %s%s" % (c1, e, bi, bt, isopzstr, bcl if bcl in "LAU" else "", "" if loc is None else "%5.1f%%" % (loc * 100.0), c2))
+
+	if localization_warning == -1:
+		sys.stderr.write("Warning (do_bhz): The quantum-well localization of the bands could not be determined.\n")
+		sys.stderr.write("                  Distrust BHZ results if input contains poorly localized bands.\n")
+		print("Band localization could not be determined.")
+	elif localization_warning > 0:
+		sys.stderr.write("Warning (do_bhz): The input contains %i bands with poor quantum-well localization (< %i%%).\n" % (localization_warning, int(loc_threshold * 100)))
+		sys.stderr.write("                  Distrust BHZ results if input contains poorly localized bands.\n")
+		sys.stderr.write("                  Choose a smaller number of bands.\n")
+		print("There are bands with poor quantum-well localization.")
+
+	if bandtypes is None:
+		bandtypes = bindex  # Use band index to identify bands if characters are not available
+	bands_lower = [(e, bt) for e, bt, bcl in zip(eival, bandtypes, band_cat) if bcl == 'L']
+	bands_upper = [(e, bt) for e, bt, bcl in zip(eival, bandtypes, band_cat) if bcl == 'U']
+	bands_a_out = [(e, bt) for e, bt, bcl in zip(eival, bandtypes, band_cat) if bcl == 'A']
+	## Reordering of bands was removed, as it did not do anything useful
+	## I am adding this note in case in hindsight the removal needs to be reverted.
+
+	if band_error:
+		return None, None, None
+	else:
+		return bands_lower, bands_a_out, bands_upper
+
+def bhz_param(h_bhz, magn, verbose = False):
+	"""Find BHZ parameters
+
+	Argument:
+	h_bhz    SymbolicHamiltonian instance with h_bhz.dim = 4. The fit parameters
+	         of a 4x4 BHZ Hamiltonian.
+	magn     Vector instance or float. Magnetic field.
+
+	Returns:
+	[a, b, c, d, m]  A list of five 2-tuples of floats. The values of BHZ
+	                 parameters A, B, C, D, and M for the two respective 'spin'
+	                 blocks.
+	"""
+	if not isinstance(h_bhz, SymbolicMatrix) or h_bhz.dim != 4:
+		sys.stderr.write("ERROR (BHZ_param): Fit parameter matrix should be 4x4\n")
+		return
+
+	eB = eoverhbar * magn.z() if isinstance(magn, Vector) else eoverhbar * magn
+	diag0 = [h_bhz[i, i].evaluate((0.0, 0.0), eB) for i in range(0, 4)]
+	diagk2 = [h_bhz[i, i].evaluate((1.0, 0.0), 0.0) for i in range(0, 4)]
+
+	a = [
+		0.0 if h_bhz[0, 1].leadingorder(1e-7).opsum == {} else list(h_bhz[0, 1].leadingorder(1e-7).opsum.values())[0],
+		0.0 if h_bhz[2, 3].leadingorder(1e-7).opsum == {} else list(h_bhz[2, 3].leadingorder(1e-7).opsum.values())[0]
+	]
+	b = [(diagk2[1] - diagk2[0]) / 2, (diagk2[3] - diagk2[2]) / 2]
+	c = [(diag0[0] + diag0[1]) / 2, (diag0[2] + diag0[3]) / 2]
+	d = [-(diagk2[1] + diagk2[0]) / 2, -(diagk2[3] + diagk2[2]) / 2]
+	m = [(diag0[0] - diag0[1]) / 2, (diag0[2] - diag0[3]) / 2]
+	if verbose:
+		print()
+		print("BHZ parameters:")
+		print("A:", a[0], a[1])
+		print("B:", b[0], b[1])
+		print("D:", d[0], d[1])
+		print("C:", c[0], c[1])
+		print("M:", m[0], m[1])
+
+	return [a, b, c, d, m]
+
+
+COLOR_DISPLAY = sys.stdout.isatty()
+
+## Calculate BHZ Hamiltonian
+def do_bhz(data, params, energy = 0.0, neig = 50, spin_obs = None, loc_obs = None, par_obs = None, bands_a = None, bands_upper = None, bands_lower = None, k0 = 0.0, verbose = False, angles = 2, num_cpus = 1, localization_observable_index = None, **modelopts):
+	"""Calculate a BHZ-like Hamiltonian using 'Löwdin partitioning'.
+
+	Arguments:
+	data           DiagData instance.
+	params         PhysParams instance. Is passed to Hamiltonian.
+	spin_obs       Integer or string. Index or id of the 'spin' observable. It
+	               typically corresponds to Jz or Sz.
+	loc_obs        Integer or string. Index or id of the localization
+	               observable. The values of this observable should express the
+	               probability density inside the 'confinement' (e.g., the
+	               quantum well).
+	par_obs        Integer, string or None. Index or id of the parity
+	               observable. If None, do not consider parity values.
+	bands_a        List of integers. Bands considered as A bands, considered
+	               exactly at zero momentum.
+	bands_lower, bands_upper
+		           Lists of integers. Bands below and above the A bands that are
+		           considered perturbatively. If None, determine these bands
+		           automatically.
+	k0             Vector instance or float. Momentum value at which to take h0.
+	               This point must be present in argument data.
+	verbose        True or False. If True, print some diagnostic information.
+	angles         NOT USED
+	num_cpus       NOT USED
+	localization_observable_index
+	               NOT USED
+	energy, neig, **modelopts
+		           Parameters and keywords arguments affecting the calculation.
+		           Is passed to diagonalization function and Hamiltonian.
+
+	Returns:
+	bandtypes_result  List of strings. Band characters labelling the basis
+	                  states of the BHZ-like Hamiltonian.
+	bhz_param         List of five 2-tuples. The output of bhz_param(). If the
+	                  number of A bands is not 4, return an empty list [].
+	h_bhz             SymbolicHamiltonian instance. The BHZ-like Hamiltonian
+	"""
+	# TODO: Clean up arguments
+	intermediate_bands = get_config_bool('bhz_allow_intermediate_bands')
+
+	## Get data at k0 (= 0 by default)
+	modelopts['orbital_magn'] = False
+	if k0 is None or k0 == 0.0:
+		try:
+			diagdata_k0 = data.get_zero_point(ignore_paramval = True)
+		except:
+			hsym = SymbolicHamiltonian(hz_sparse_split, (params,), modelopts)
+			diagdata_k0 = diag.hsym_k0(hsym, params, energy = energy, neig = neig, **modelopts)
+	else:
+		if 'lattice_reg' in modelopts and modelopts['lattice_reg'] is True:
+			sys.stderr.write("ERROR (do_bhz): Perturbation theory at k0 != 0 cannot be done reliably with lattice regularization enabled. Please use the configuration value 'lattice_regularization=false'.\n")
+			return [], [], []
+		diagdata_k0 = data.find(k0)
+		if diagdata_k0 is None:
+			hsym = SymbolicHamiltonian(hz_sparse_split, (params,), modelopts)
+			diagdata_k0 = diag.hsym_k0(hsym, params, energy = energy, neig = neig, **modelopts)
+	del modelopts['orbital_magn']
+	if diagdata_k0 is None:
+		sys.stderr.write("ERROR (do_bhz): Unable to find zero point.\n")
+		return [], [], []
+
+	b0 = None if diagdata_k0.paramval is None or diagdata_k0.paramval == 0 else diagdata_k0.paramval  # Magnetic field
+
+	## Select bands
+	sys.stderr.write("Analyzing bands... \n")
+	bands_lower, bands_a, bands_upper = select_bhz_bands(diagdata_k0, bands_a = bands_a, bands_upper = bands_upper, bands_lower = bands_lower, loc_obs = loc_obs, par_obs = par_obs, intermediate_bands = intermediate_bands)
+
+	if None in [bands_lower, bands_a, bands_upper]:
+		sys.stderr.write("ERROR (do_bhz): Perturbation theory failed\n")
+		return [], [], []
+
+	## Do perturbation
+	sys.stderr.write("Performing perturbation theory... \n")
+	modelopts_pert = {'energy': energy, 'neig': neig, 'spin_obs': spin_obs, 'k0': k0, 'verbose': verbose}
+	modelopts_pert.update(modelopts)
+	ns, hper0, hper1, hper2 = pert_bhz(data, params, bands_a = bands_a, bands_upper = bands_upper, bands_lower = bands_lower, b0 = b0, **modelopts_pert)
+	[nu, na, nl] = ns
+
+	if None in [hper0, hper1, hper2]:
+		sys.stderr.write("ERROR (do_bhz): Perturbation theory failed\n")
+		return [], [], []
+
+	## Tidy up results by removing very small values
+	sys.stderr.write("Tidying results... \n")
+	hper1c = [[x.chop(1e-7) for x in x1] for x1 in hper1]
+	hper2c = [[x.chop(1e-7) for x in x1] for x1 in hper2]
+	htotal = SymbolicMatrix(hper0) + SymbolicMatrix(hper1c).maxorder(2) + SymbolicMatrix(hper2c).maxorder(2)
+
+	if verbose:
+		print(ns)
+		print("H0 (%s):" % type(hper0))
+		print(hper0)
+		print()
+
+		print("H1 (%s):" % type(hper1))
+		for i in range(0, na):
+			for j in range(0, na):
+				print("H1(%i, %i) =" % (i, j), hper1c[i][j].maxorder(2).kp_km_str())
+
+		print(str(SymbolicMatrix(hper1c).maxorder(2)))
+		print()
+
+		print("H2 (%s):" % type(hper2))
+		for i in range(0, na):
+			for j in range(0, na):
+				print("H2(%i, %i) =" % (i, j), hper2c[i][j].maxorder(2).kp_km_str())
+
+		print(str(SymbolicMatrix(hper2c).maxorder(2)))
+		print("Hermiticity check (H2):")
+		hper2m = SymbolicMatrix(hper2c)
+		hper2m_imag = hper2m - hper2m.conjugate()
+		hper2m_imag_max = np.amax([np.amax(np.abs(hper2m_imag[o])) for o in hper2m_imag.opsum])
+		herm_result = "OK" if hper2m_imag_max < 1e-9 else "Failed"
+		print("%s (delta = %g)" % (herm_result, hper2m_imag_max))
+		if hper2m_imag_max >= 1e-9:
+			print(str(hper2m_imag))
+			print()
+		print("H_BHZ:")
+		print(str(htotal))
+		for i in range(0, htotal.dim):
+			for j in range(0, htotal.dim):
+				if htotal[i, j].iszero(1e-7):
+					print(" .. ", end=' ')
+				else:
+					coeffs = [np.abs(x) for x in htotal[i, j].opsum.values()]
+					print("%4i" % int(max(coeffs)), end=' ')
+			print()
+
+		hnonzero = []
+		print("Hnonzero:")
+		for i in range(0, htotal.dim):
+			hnonzero.append([j for j in range(0, htotal.dim) if not htotal[i, j].iszero(1e-7)])
+		print(hnonzero)
+		print()
+
+	## Find and attempt to remove complex phases
+	## REMOVED --> Now the phases are determined already in pert_bhz(), which should be more reliable
+	h_bhz = htotal.chop(1e-7)
+
+	## Reorder into groups (aka blocks), if possible
+	nonzero_groups = [[j for j in range(0, na) if not h_bhz[i, j].iszero(1e-7)] for i in range(0, na)]
+	if isinstance(par_obs, str):
+		par_obs = diagdata_k0.obsids.index(par_obs) if par_obs in diagdata_k0.obsids else None
+	if par_obs is not None and diagdata_k0 is not None and diagdata_k0.obsvals is not None and len(diagdata_k0.obsvals) > par_obs:
+		reduced_groups = [[], []]
+		for j in range(0, na):
+			e0 = float(np.real(h_bhz[j, j].evaluate((0.0, 0.0), 0.0)))
+			eidx = diagdata_k0.get_index(e0)
+			par_val = np.real(diagdata_k0.obsvals[par_obs][eidx])
+			if par_val > 0:
+				reduced_groups[0].append(j)
+			else:
+				reduced_groups[1].append(j)
+			# print (j, e0, eidx, par_val)
+	else:
+		reduced_groups = [nonzero_groups[0]]
+		for nzg in nonzero_groups[1:]:
+			new_group = True
+			for rg in reduced_groups:
+				if not set(rg).isdisjoint(nzg):
+					new_group = False
+					for x in nzg:
+						if x not in rg:
+							rg.append(x)
+			if new_group:
+				reduced_groups.append(nzg)
+
+	if verbose:
+		print("Nonzero groups:", nonzero_groups)
+		print("Reduced groups:", reduced_groups)
+
+	# Test if we get a valid reordering, which are two blocks of total length na,
+	# and all indices must appear in their union.
+	eival_a = np.array([b[0] for b in bands_a])
+	btype_a = np.array([b[1] for b in bands_a])
+
+	if len(reduced_groups) != 2:  # alternative approach, using estimate of isoparity operator
+		isopz_estimate = [1 for b in btype_a]
+		ok = True
+		for j, b in enumerate(btype_a):
+			if not isinstance(b, str):
+				ok = False
+				break
+			if b[0] == 'E' or b[0] == 'H':
+				isopz_estimate[j] *= -1
+			elif b[0] == 'L':
+				pass
+			else:
+				ok = False
+				break
+			try:
+				bn = int(b[1:-1])
+				isopz_estimate[j] *= (-1)**bn
+			except:
+				ok = False
+				break
+			if b[-1] == '+':
+				pass
+			elif b[-1] == '-':
+				isopz_estimate[j] *= -1
+			else:
+				ok = False
+				break
+		if ok and sum(isopz_estimate) == 0:
+			reduced_groups = [[j for j, p in enumerate(isopz_estimate) if p == 1], [j for j, p in enumerate(isopz_estimate) if p == -1]]
+			if verbose:
+				print("After second attempt using isoparity estimate:")
+				print("Reduced groups:", reduced_groups)
+
+	if len(reduced_groups) == 2:
+		reordering = [x for g in reduced_groups for x in g]
+		if len(reordering) == na:
+			valid_reordering = True
+			for i in range(0, na):
+				if i not in reordering:
+					valid_reordering = False
+					break
+			if valid_reordering:
+				# Attempt reordering of the second block, in the equivalent order
+				# of the first block, i.e., with matching band character but
+				# opposite spin.
+				bandtypes1 = []
+				for j in reduced_groups[0]:
+					e0 = h_bhz[j, j].evaluate((0.0, 0.0), 0.0)
+					idx = np.argsort(np.abs(eival_a - e0))[0]
+					bandtypes1.append(btype_a[idx])
+				bandtypes2 = []
+				for j in reduced_groups[1]:
+					e0 = h_bhz[j, j].evaluate((0.0, 0.0), 0.0)
+					idx = np.argsort(np.abs(eival_a - e0))[0]
+					bandtypes2.append(btype_a[idx])
+				if verbose:
+					print("Block 1:", bandtypes1)
+					print("Block 2:", bandtypes2)
+				partner_band = []
+				for bt in bandtypes1:
+					if not isinstance(bt, str):
+						partner_band.append(None)
+						continue
+					partner = bt[:-1] + ('-' if bt[-1] == '+' else '+' if bt[-1] == '-' else '?')
+					if '?' in partner:
+						partner_band.append(None)
+					elif partner in bandtypes2:
+						partner_band.append(reduced_groups[1][bandtypes2.index(partner)])
+					else:
+						partner_band.append(None)
+				if None not in partner_band:
+					reordering = reduced_groups[0] + partner_band
+
+				h_bhz = h_bhz.shuffle(reordering)
+
+	## Output basis order
+	sys.stderr.write("Perturbation theory done\n")
+	bandtypes_result = []
+	for j in range(0, na):
+		e0 = h_bhz[j, j].evaluate((0.0, 0.0), 0.0)
+		idx = np.argsort(np.abs(eival_a - e0))[0]
+		bandtypes_result.append(bandtype_str(btype_a[idx]))
+	## Print some information
+	print("Basis order (k = 0):", ", ".join(bandtypes_result))
+	print("Perturbative bands:", ", ".join([bandtype_str(b[1]) for b in bands_lower] + [bandtype_str(b[1]) for b in bands_upper]))
+
+	## Print final result
+	if verbose:
+		floatfmt = '%g'
+		degfmt = '%g'
+		print("Final result:")
+		for i in range(0, na):
+			for j in range(0, na):
+				print("H_BHZ(%i, %i) =" % (i, j), h_bhz[i, j].kp_km_str(fmt = floatfmt, degfmt = degfmt))
+		print()
+		print("Hermiticity check:")
+		h_bhz_imag = h_bhz - h_bhz.conjugate()
+		h_bhz_imag_max = np.amax([np.amax(np.abs(h_bhz_imag[o])) for o in h_bhz_imag.opsum])
+		herm_result = "OK" if h_bhz_imag_max < 1e-9 else "Failed"
+		print("%s (delta = %g)" % (herm_result, h_bhz_imag_max))
+
+	if na == 4:
+		bhzparam = bhz_param(h_bhz, params.magn, verbose)
+		return bandtypes_result, bhzparam, h_bhz
+	else:
+		return bandtypes_result, [], h_bhz
diff --git a/kdotpy-v1.0.0/src/kdotpy/bhzprint.py b/kdotpy-v1.0.0/src/kdotpy/bhzprint.py
new file mode 100644
index 0000000000000000000000000000000000000000..f68bfd63c96bb84be305a445a436b2afd29d48ce
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/bhzprint.py
@@ -0,0 +1,873 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+import subprocess as subp
+import os
+import os.path
+import sys
+from .physparams import eoverhbar, muB
+from .config import get_config_bool
+from .momentum import Vector
+from .symbolic import polar
+
+### HELPER FUNCTIONS FOR BHZ OUTPUT ###
+
+### PLAIN TEXT OUTPUT ###
+def print_sin(coeff, var = "theta"):
+	"""Format string 'sin(coeff var)'"""
+	if coeff == 0:
+		return " * 0"
+	if coeff == 1:
+		return " sin(%s)" % var
+	if coeff == -1:
+		return " sin(-%s)" % var
+	return " sin(%s %s)" % (coeff, var)
+
+def print_cos(coeff, var = "theta"):
+	"""Format string 'cos(coeff var)'"""
+	if coeff == 0:
+		return ""
+	if coeff == 1:
+		return " cos(%s)" % var
+	if coeff == -1:
+		return " cos(-%s)" % var
+	return " cos(%s %s)" % (coeff, var)
+
+def print_coeff(z, thr = 1e-4, var = "k"):
+	"""Format matrix coefficient
+
+	Arguments:
+	z     Numpy array containing coefficients in one of two following ways.
+	      1-dim: [z_0, z_1, z_2, ...] yields 'z_0 + z_1 k + z_2 k^2 + ...'
+	      2-dim: [[c_00], [c_11, s_11], [c_22, s_22, c_20], ...] yields
+	        'c_00 + c_11 k cos(theta) + s_11 k sin(theta)
+	         + c_22 k^2 cos(2 theta) + s_22 k^2 sin(2 theta) + c_20 k^2 + ...'
+	thr   Float. Threshold below which an element is considered zero.
+	var   String. NOT USED (yet?)
+
+	Returns:
+	String.
+	"""
+	z = np.asarray(z)
+	terms = []
+	if z.ndim == 1:
+		nord = z.shape[0]
+		for o in range(nord - 1, 1, -1):
+			if abs(z[o]) > thr:
+				terms.append(polar(z[o]) + " k^%i" % o)
+		if abs(z[1]) > thr:
+			terms.append(polar(z[1]) + " k")
+		if abs(z[0]) > thr:
+			terms.append(polar(z[0]))
+	elif z.ndim == 2:
+		nord = z.shape[0]
+		for o in range(nord - 1, 1, -1):
+			for oo in range(0, o + 1, 2):  # cos terms
+				if abs(z[o, oo]) > thr:
+					terms.append(polar(z[o, oo]) + " k^%i%s" % (o, print_cos(o - oo)))
+			for oo in range(1, o + 1, 2):  # sin terms
+				if abs(z[o, oo]) > thr:
+					terms.append(polar(z[o, oo]) + " k^%i%s" % (o, print_sin(o - oo + 1)))
+
+		if abs(z[1, 0]) > thr:
+			terms.append(polar(z[1, 0]) + " k cos(theta)")
+		if abs(z[1, 1]) > thr:
+			terms.append(polar(z[1, 1]) + " k sin(theta)")
+		if abs(z[0, 0]) > thr:
+			terms.append(polar(z[0, 0]))
+	if len(terms) == 0:
+		return "0"
+	return " + ".join(terms)
+
+def print_bhz_matrix(fitp, print_zeros = True):
+	"""Print entries of a BHZ-like matrix
+
+	fitp         Numpy array. The fit parameters. See print_coeff() for more
+	             details.
+	print_zeros  True or False. If False, do not print zero entries.
+
+	No return value.
+	"""
+	fitp = np.asarray(fitp)
+	dim = fitp.shape[0]
+	for i in range(0, dim):
+		for j in range(i, dim):
+			s = print_coeff(fitp[i, j])
+			if print_zeros or (s != "0" and s != ""):
+				print("(%2i, %2i): %s" % (i, j, s))
+	return
+
+### TEX OUTPUT ###
+def tex_polar(z, v = None, fmt = "%.3f"):
+	"""Format a complex number in polar (exponential) representation.
+
+	Arguments:
+	z     Complex number.
+	v     String or None. If set, it replaces the string for |z|.
+	fmt   String. Formatting string for float values.
+
+	Returns:
+	String.
+	"""
+	ab = abs(z)
+	ar = np.angle(z)
+	if ab < 1e-10:
+		return "0"
+
+	if v is None:
+		valstr = fmt % ab
+	else:
+		valstr = v
+
+	if abs(ar) < 1e-3:
+		return valstr
+	if abs(ar - np.pi) < 1e-3 or abs(ar + np.pi) < 1e-3:
+		return "-" + valstr
+	if abs(ar - 0.5 * np.pi) < 1e-3:
+		return valstr + "\\ii"
+	if abs(ar + 0.5 * np.pi) < 1e-3:
+		return "-" + valstr + "\\ii"
+	return valstr + " \\ee^{" + (fmt % (ar / np.pi)) + "\\pi}"
+
+def k_replace(s, to = None):
+	if to is None:
+		to = r'\tilde{k}'
+	s1 = s.replace('k^', to + '^')
+	s1 = s1.replace('k_-', to + '_-')
+	s1 = s1.replace('k_+', to + '_+')
+	return s1
+
+def tex_splitterms(s_in, n, newline = ' \\nonumber\\\\\n  &'):
+	"""Split TeX output into terms, preventing extremely long lines in the TeX source.
+
+	Arguments:
+	s_in       String. The input.
+	n          Integer. Target maximum line length.
+	newline    String. TeX string for a newline.
+	"""
+	j = 0   # term counter; fold if equal to n
+	cb = 0  # curly bracket level
+	s_out = ""
+	for s in s_in:
+		if cb == 0 and s in '+-':
+			if j >= n:
+				s_out += newline
+				j = 0
+			else:
+				j += 1
+		elif s == '{':
+			cb += 1
+		elif s == '}':
+			cb -= 1
+		s_out += s
+	return s_out
+
+def tex_basis_state(lb):
+	try:
+		i = int(lb)
+	except:
+		return "\\ket{\\mathrm{%s}%s,%s}" % (lb[0], lb[1:-1], lb[-1])
+	else:
+		return ("\\ket{+%i}" % i) if i > 0 else ("\\ket{%i}" % i)
+
+def tex_bhz_matrix_fourband(hsym, thr = 1e-2):
+	"""Format BHZ matrix elements and coefficients as TeX output. Standard four-band representation
+
+	Arguments:
+	hsym         SymbolicMatrix instance. The symbolic matrix that encodes the
+	             BHZ-like Hamiltonian. This is the result of do_bhz().
+	thr          Float. Minimal magnitude for a coefficient to be treated as
+	             nonzero.
+
+	Returns:
+	tex_matrix   List of lists of strings. The TeX strings of the matrix
+	             elements.
+	tex_coeff    Dict instance. The keys are the TeX strings of the
+	             coefficients, the values their numerical values.
+	"""
+	g_factor = get_config_bool('bhz_gfactor')
+	dim = hsym.dim
+	if dim != 4:
+		sys.stderr.write("Warning (tex_bhz_matrix_fourband): Hamiltonian does not have a four-band basis. Use the generic output format instead.\n")
+		return tex_bhz_matrix_generic(hsym, thr = thr)
+	tex_matrix = [["0" for j in range(0, dim)] for i in range(0, dim)]
+	tex_coeff = dict()
+
+	# A
+	opsum01 = hsym[0, 1].leadingorder(1e-7).opsum
+	opsum23 = hsym[2, 3].leadingorder(1e-7).opsum
+	a = [0.0, 0.0]
+	a_sign = ["", ""]
+	if len(opsum01) == 1 and ('+' in opsum01 or '-' in opsum01):
+		a[0] = list(opsum01.values())[0]
+		a_sign[0] = list(opsum01.keys())[0]
+	if len(opsum23) == 1 and ('+' in opsum23 or '-' in opsum23):
+		a[1] = list(opsum23.values())[0]
+		a_sign[1] = list(opsum23.keys())[0]
+	if "" in a_sign:  # either one failed
+		sys.stderr.write("Warning (tex_bhz_matrix_fourband): Matrix cannot be brought to standard 4x4 form (incompatible term at position |E1+> <H1+|).\n")
+		return tex_bhz_matrix_generic(hsym, thr = thr)
+
+	if abs(np.imag(a[0])) < 1e-7 and abs(np.imag(a[1])) < 1e-7 and abs(a[0] - a[1]) < 2 * thr:
+		tex_matrix[0][1] = "A k_%s" % a_sign[0]
+		tex_matrix[1][0] = "A k_%s" % ('-' if a_sign[0] == '+' else '+')
+		tex_matrix[2][3] = "A k_%s" % a_sign[1]
+		tex_matrix[3][2] = "A k_%s" % ('-' if a_sign[1] == '+' else '+')
+		tex_coeff['A'] = (a[0] + a[1]) / 2
+	elif abs(np.imag(a[0])) < 1e-7 and abs(np.imag(a[1])) < 1e-7 and abs(a[0] + a[1]) < 2 * thr:
+		tex_matrix[0][1] = "A k_%s" % a_sign[0]
+		tex_matrix[1][0] = "A k_%s" % ('-' if a_sign[0] == '+' else '+')
+		tex_matrix[2][3] = "-A k_%s" % a_sign[1]
+		tex_matrix[3][2] = "-A k_%s" % ('-' if a_sign[1] == '+' else '+')
+		tex_coeff['A'] = (a[0] - a[1]) / 2
+	elif abs(np.real(a[0])) < 1e-7 and abs(np.real(a[1])) < 1e-7 and abs(np.imag(a[0]) - np.imag(a[1])) < 2 * thr:
+		pm = '' if np.imag(a[0]) + np.imag(a[1]) > 0 else '-'
+		mp = '-' if np.imag(a[0]) + np.imag(a[1]) > 0 else ''
+		tex_matrix[0][1] = "%s\\ii A k_%s" % (pm, a_sign[0])
+		tex_matrix[1][0] = "%s\\ii A k_%s" % (mp, '-' if a_sign[0] == '+' else '+')
+		tex_matrix[2][3] = "%s\\ii A k_%s" % (pm, a_sign[1])
+		tex_matrix[3][2] = "%s\\ii A k_%s" % (mp, '-' if a_sign[1] == '+' else '+')
+		tex_coeff['A'] = abs(np.imag(a[0]) + np.imag(a[1])) / 2
+	elif abs(np.real(a[0])) < 1e-7 and abs(np.real(a[1])) < 1e-7 and abs(np.imag(a[0]) + np.imag(a[1])) < 2 * thr:
+		pm = '' if np.imag(a[0]) - np.imag(a[1]) > 0 else '-'
+		mp = '-' if np.imag(a[0]) - np.imag(a[1]) > 0 else ''
+		tex_matrix[0][1] = "%s\\ii A k_%s" % (pm, a_sign[0])
+		tex_matrix[1][0] = "%s\\ii A k_%s" % (mp, '-' if a_sign[0] == '+' else '+')
+		tex_matrix[2][3] = "%s\\ii A k_%s" % (mp, a_sign[1])
+		tex_matrix[3][2] = "%s\\ii A k_%s" % (pm, '-' if a_sign[1] == '+' else '+')
+		tex_coeff['A'] = abs(np.imag(a[0]) - np.imag(a[1])) / 2
+	else:
+		tex_matrix[0][1] = "A_1 k_%s" % a_sign[0]
+		tex_matrix[1][0] = "A_1 k_%s" % ('-' if a_sign[0] == '+' else '+')
+		tex_matrix[2][3] = "A_2 k_%s" % a_sign[1]
+		tex_matrix[3][2] = "A_2 k_%s" % ('-' if a_sign[1] == '+' else '+')
+		tex_coeff['A_1'] = a[0]
+		tex_coeff['A_2'] = a[1]
+
+	# C and M
+	diag_e = [hsym[i, i].opsum.get('', 0.0) for i in range(0, 4)]
+	c = [(diag_e[0] + diag_e[1]) / 2, (diag_e[2] + diag_e[3]) / 2]
+	m = [(diag_e[0] - diag_e[1]) / 2, (diag_e[2] - diag_e[3]) / 2]
+	if abs(c[0] - c[1]) < 2 * thr:
+		tex_matrix[0][0] = "C"
+		tex_matrix[1][1] = "C"
+		tex_matrix[2][2] = "C"
+		tex_matrix[3][3] = "C"
+		tex_coeff['C'] = (c[0] + c[1]) / 2
+	else:
+		tex_matrix[0][0] = "C_1"
+		tex_matrix[1][1] = "C_1"
+		tex_matrix[2][2] = "C_2"
+		tex_matrix[3][3] = "C_2"
+		tex_coeff['C_1'] = c[0]
+		tex_coeff['C_2'] = c[1]
+	if abs(m[0] - m[1]) < 2 * thr:
+		tex_matrix[0][0] += " + M"
+		tex_matrix[1][1] += " - M"
+		tex_matrix[2][2] += " + M"
+		tex_matrix[3][3] += " - M"
+		tex_coeff['M'] = (m[0] + m[1]) / 2
+	else:
+		tex_matrix[0][0] += " + M_1"
+		tex_matrix[1][1] += " - M_1"
+		tex_matrix[2][2] += " + M_2"
+		tex_matrix[3][3] += " - M_2"
+		tex_coeff['M_1'] = m[0]
+		tex_coeff['M_2'] = m[1]
+
+	# B and D
+	diag_bb = [hsym[i, i].opsum.get('+-', 0.0) + hsym[i, i].opsum.get('-+', 0.0) for i in range(0, 4)]
+	b = [(diag_bb[1] - diag_bb[0]) / 2, (diag_bb[3] - diag_bb[2]) / 2]
+	d = [-(diag_bb[1] + diag_bb[0]) / 2, -(diag_bb[3] + diag_bb[2]) / 2]
+	if abs(b[0] - b[1]) < 2 * thr:
+		tex_matrix[0][0] += " - (B"
+		tex_matrix[1][1] += " + (B"
+		tex_matrix[2][2] += " - (B"
+		tex_matrix[3][3] += " + (B"
+		tex_coeff['B'] = (b[0] + b[1]) / 2
+	else:
+		tex_matrix[0][0] += " - (B_1"
+		tex_matrix[1][1] += " + (B_1"
+		tex_matrix[2][2] += " - (B_2"
+		tex_matrix[3][3] += " + (B_2"
+		tex_coeff['B_1'] = b[0]
+		tex_coeff['B_2'] = b[1]
+	if abs(d[0] - d[1]) < 2 * thr:
+		tex_matrix[0][0] += " + D) k^2"
+		tex_matrix[1][1] += " - D) k^2"
+		tex_matrix[2][2] += " + D) k^2"
+		tex_matrix[3][3] += " - D) k^2"
+		tex_coeff['D'] = (d[0] + d[1]) / 2
+	else:
+		tex_matrix[0][0] += " + D_1) k^2"
+		tex_matrix[1][1] += " - D_1) k^2"
+		tex_matrix[2][2] += " + D_2) k^2"
+		tex_matrix[3][3] += " - D_2) k^2"
+		tex_coeff['D_1'] = d[0]
+		tex_coeff['D_2'] = d[1]
+
+	g = [-hsym[i, i].opsum.get('+-', 0.0) + hsym[i, i].opsum.get('-+', 0.0) for i in range(0, 4)]
+	if abs(g[0] + g[2]) < 2 * thr:
+		tex_matrix[0][0] += " + %s \\mathcal{B}" % ("g_\\mathrm{E} \\muB" if g_factor else "G_\\mathrm{E}")
+		tex_matrix[2][2] += " - %s \\mathcal{B}" % ("g_\\mathrm{E} \\muB" if g_factor else "G_\\mathrm{E}")
+		if g_factor:
+			tex_coeff['g_\\mathrm{E}'] = 0.5 * (g[0] - g[2]) * eoverhbar / muB
+		else:
+			tex_coeff['G_\\mathrm{E}'] = 0.5 * (g[0] - g[2]) * eoverhbar
+	else:
+		tex_matrix[0][0] += " + %s \\mathcal{B}" % ("g_{\\mathrm{E},1} \\muB" if g_factor else "G_{\\mathrm{E},1}")
+		tex_matrix[2][2] += " - %s \\mathcal{B}" % ("g_{\\mathrm{E},2} \\muB" if g_factor else "G_{\\mathrm{E},2}")
+		if g_factor:
+			tex_coeff['g_{\\mathrm{E},1}'] = g[0] * eoverhbar / muB
+			tex_coeff['g_{\\mathrm{E},2}'] = -g[2] * eoverhbar / muB
+		else:
+			tex_coeff['G_{\\mathrm{E},1}'] = g[0] * eoverhbar
+			tex_coeff['G_{\\mathrm{E},2}'] = -g[2] * eoverhbar
+
+	if abs(g[1] + g[3]) < 2 * thr:
+		tex_matrix[1][1] += " + %s \\mathcal{B}" % ("g_\\mathrm{H} \\muB" if g_factor else "G_\\mathrm{H}")
+		tex_matrix[3][3] += " - %s \\mathcal{B}" % ("g_\\mathrm{H} \\muB" if g_factor else "G_\\mathrm{H}")
+		if g_factor:
+			tex_coeff['g_\\mathrm{H}'] = 0.5 * (g[1] - g[3]) * eoverhbar / muB
+		else:
+			tex_coeff['G_\\mathrm{H}'] = 0.5 * (g[1] - g[3]) * eoverhbar
+	else:
+		tex_matrix[1][1] += " + %s \\mathcal{B}" % ("g_{\\mathrm{H},1} \\muB" if g_factor else "G_{\\mathrm{H},1}")
+		tex_matrix[3][3] += " - %s \\mathcal{B}" % ("g_{\\mathrm{H},2} \\muB" if g_factor else "G_{\\mathrm{H},2}")
+		if g_factor:
+			tex_coeff['g_{\\mathrm{H},1}'] = g[1] * eoverhbar / muB
+			tex_coeff['g_{\\mathrm{H},2}'] = -g[3] * eoverhbar / muB
+		else:
+			tex_coeff['G_{\\mathrm{H},1}'] = g[1] * eoverhbar
+			tex_coeff['G_{\\mathrm{H},2}'] = -g[3] * eoverhbar
+
+	## Non-block-diagonal terms
+	if not hsym[0, 2].iszero(thr):
+		opsum = hsym[0, 2].leadingorder(1e-7).opsum
+		if '+' in opsum and '-' not in opsum:
+			tex_matrix[0][2] = 'R_\\mathrm{E} k_+'
+			tex_matrix[2][0] = 'R_\\mathrm{E} k_-'
+			tex_coeff['R_\\mathrm{E}'] = hsym[0, 2].leadingorder(1e-7).opsum['+']
+		elif '-' in opsum and '+' not in opsum:
+			tex_matrix[0][2] = 'R_\\mathrm{E} k_-'
+			tex_matrix[2][0] = 'R_\\mathrm{E} k_+'
+			tex_coeff['R_\\mathrm{E}'] = hsym[0, 2].leadingorder(1e-7).opsum['-']
+		elif '+' in opsum and '-' in opsum:
+			tex_matrix[0][2] = 'R_{\\mathrm{E},1} k_+ + R_{\\mathrm{E},2} k_-'
+			tex_matrix[2][0] = 'R_{\\mathrm{E},1} k_- + R_{\\mathrm{E},2} k_+'
+			tex_coeff['R_{\\mathrm{E},1}'] = hsym[0, 2].leadingorder(1e-7).opsum['+']
+			tex_coeff['R_{\\mathrm{E},2}'] = hsym[0, 2].leadingorder(1e-7).opsum['-']
+		else:
+			sys.stderr.write("Warning (tex_bhz_matrix_fourband): Matrix cannot be brought to standard 4x4 form (incompatible term at position |E1+> <E1-|).\n")
+			return tex_bhz_matrix_generic(hsym, thr = thr)
+	if not hsym[1, 3].iszero(thr):
+		opsum = hsym[1, 3].leadingorder(1e-7).opsum
+		if '+' in opsum and '-' not in opsum:
+			tex_matrix[1][3] = 'R_\\mathrm{H} k_+'
+			tex_matrix[3][1] = 'R_\\mathrm{H} k_-'
+			tex_coeff['R_\\mathrm{H}'] = hsym[1, 3].leadingorder(1e-7).opsum['+']
+		elif '-' in opsum and '+' not in opsum:
+			tex_matrix[1][3] = 'R_\\mathrm{H} k_-'
+			tex_matrix[3][1] = 'R_\\mathrm{H} k_+'
+			tex_coeff['R_\\mathrm{H}'] = hsym[1, 3].leadingorder(1e-7).opsum['-']
+		elif '+' in opsum and '-' in opsum:
+			tex_matrix[1][3] = 'R_{\\mathrm{H},1} k_+ + R_{\\mathrm{H},2} k_-'
+			tex_matrix[3][1] = 'R_{\\mathrm{H},1} k_- + R_{\\mathrm{H},2} k_+'
+			tex_coeff['R_{\\mathrm{H},1}'] = hsym[1, 3].leadingorder(1e-7).opsum['+']
+			tex_coeff['R_{\\mathrm{H},2}'] = hsym[1, 3].leadingorder(1e-7).opsum['-']
+		else:
+			sys.stderr.write("Warning (tex_bhz_matrix_fourband): Matrix cannot be brought to standard 4x4 form (incompatible term at position |H1+> <H1-|).\n")
+			return tex_bhz_matrix_generic(hsym, thr = thr)
+	if not (hsym[0, 3].iszero(thr) and hsym[1, 2].iszero(thr)):
+		opsum03 = hsym[0, 3].leadingorder(1e-7).opsum
+		opsum12 = hsym[1, 2].leadingorder(1e-7).opsum
+		if (('++' in opsum03 or '--' in opsum03) and ('+-' in opsum03 or '-+' in opsum03)) or (('++' in opsum12 or '--' in opsum12) and ('+-' in opsum12 or '-+' in opsum12)):
+			# mixed quadratic terms: do not try to fit in standard form
+			sys.stderr.write("Warning (tex_bhz_matrix_fourband): Matrix cannot be brought to standard 4x4 form (incompatible terms at positions |E1+/-> <H1-/+|).\n")
+			return tex_bhz_matrix_generic(hsym, thr = thr)
+		elif '++' in opsum03 and '--' in opsum03 and '++' in opsum12 and '--' in opsum12:
+			if abs(opsum03['++'] - opsum12['++']) < 2 * thr and abs(opsum03['--'] - opsum12['--']) < 2 * thr:
+				tex_matrix[0][3] = 'F k_+^2 + F\' k_-^2'
+				tex_matrix[1][2] = 'F k_+^2 + F\' k_-^2'
+				tex_matrix[2][1] = 'F k_-^2 + F\' k_+^2'
+				tex_matrix[3][0] = 'F k_-^2 + F\' k_+^2'
+				tex_coeff['F'] = (opsum03['++'] + opsum12['++']) / 2
+				tex_coeff['F\''] = (opsum03['--'] + opsum12['--']) / 2
+				# TODO: Shouldn't there be a magnetic field term here?
+			else:
+				tex_matrix[0][3] = 'F_1 k_+^2 + F_1\' k_-^2'
+				tex_matrix[1][2] = 'F_2 k_+^2 + F_2\' k_-^2'
+				tex_matrix[2][1] = 'F_2 k_-^2 + F_2\' k_+^2'
+				tex_matrix[3][0] = 'F_1 k_-^2 + F_1\' k_+^2'
+				tex_coeff['F_1'] = opsum03['++']
+				tex_coeff['F_2'] = opsum12['++']
+				tex_coeff['F\'_1'] = opsum12['--']
+				tex_coeff['F\'_2'] = opsum03['--']
+		elif '+-' in opsum03 and '-+' in opsum03 and '+-' in opsum12 and '-+' in opsum12:
+			if abs(opsum03['+-'] + opsum12['-+']) < 2 * thr and abs(opsum03['-+'] + opsum12['+-']) < 2 * thr:
+				delta = (opsum03['+-'] + opsum03['-+'] + opsum12['-+'] + opsum12['+-']) / 2
+				g = (-opsum03['+-'] + opsum03['-+'] - opsum12['-+'] + opsum12['+-']) / 2
+				tex_matrix[0][3] = '\\Delta k^2 + %s \\mathcal{B}' % ("g_\\Delta \\muB" if g_factor else "G_\\Delta")
+				tex_matrix[1][2] = '-\\Delta k^2 + %s \\mathcal{B}' % ("g_\\Delta \\muB" if g_factor else "G_\\Delta")
+				tex_matrix[2][1] = '-\\Delta k^2 + %s \\mathcal{B}' % ("g_\\Delta \\muB" if g_factor else "G_\\Delta")
+				tex_matrix[3][0] = '\\Delta k^2 + %s \\mathcal{B}' % ("g_\\Delta \\muB" if g_factor else "G_\\Delta")
+				tex_coeff['\\Delta'] = delta
+				if g_factor:
+					tex_coeff['g_\\Delta'] = g * eoverhbar / muB
+				else:
+					tex_coeff['G_\\Delta'] = g * eoverhbar
+			else:
+				delta1 = opsum03['+-'] + opsum03['-+']
+				delta2 = -opsum12['-+'] - opsum12['+-']
+				g1 = -opsum03['+-'] + opsum03['-+']
+				g2 = -opsum12['+-'] + opsum12['-+']
+				tex_matrix[0][3] = '\\Delta_1 k^2 + %s \\mathcal{B}' % ("g_{\\Delta,1} \\muB" if g_factor else "G_{\\Delta,1}")
+				tex_matrix[1][2] = '-\\Delta_2 k^2 - %s \\mathcal{B}' % ("g_{\\Delta,2} \\muB" if g_factor else "G_{\\Delta,2}")
+				tex_matrix[2][1] = '-\\Delta_2 k^2 - %s \\mathcal{B}' % ("g_{\\Delta,2} \\muB" if g_factor else "G_{\\Delta,2}")
+				tex_matrix[3][0] = '\\Delta_1 k^2 + %s \\mathcal{B}' % ("g_{\\Delta,1} \\muB" if g_factor else "G_{\\Delta,1}")
+				tex_coeff['\\Delta_1'] = delta1
+				tex_coeff['\\Delta_2'] = delta2
+				if g_factor:
+					tex_coeff['g_{\\Delta,1}'] = g1 * eoverhbar / muB
+					tex_coeff['g_{\\Delta,2}'] = g2 * eoverhbar / muB
+				else:
+					tex_coeff['G_{\\Delta,1}'] = g1 * eoverhbar
+					tex_coeff['G_{\\Delta,2}'] = g2 * eoverhbar
+		else:
+			sys.stderr.write("Warning (tex_bhz_matrix_fourband): Matrix cannot be brought to standard 4x4 form (incompatible terms at positions |E1+/-> <H1-/+|).\n")
+			return tex_bhz_matrix_generic(hsym, thr = thr)
+	return tex_matrix, tex_coeff
+
+
+def tex_bhz_matrix_generic(hsym, thr = 1e-2):
+	"""Format BHZ matrix elements and coefficients as TeX output. Generic function.
+
+	Arguments:
+	hsym         SymbolicMatrix instance. The symbolic matrix that encodes the
+	             BHZ-like Hamiltonian. This is the result of do_bhz().
+	thr          Float. Minimal magnitude for a coefficient to be treated as
+	             nonzero.
+
+	Returns:
+	tex_matrix   List of lists of strings. The TeX strings of the matrix
+	             elements.
+	tex_coeff    Dict instance. The keys are the TeX strings of the
+	             coefficients, the values their numerical values.
+	"""
+	g_factor = get_config_bool('bhz_gfactor')
+	dim = hsym.dim
+	tex_matrix = [["" for j in range(0, dim)] for i in range(0, dim)]
+	tex_coeff = dict()
+	for i in range(0, dim):
+		for j in range(0, dim):
+			if hsym[i, j].iszero(thr):
+				tex_matrix[i][j] = "0"
+				continue
+			z = hsym[i, j].chop(thr).opsum
+			if i == j:
+				if '' in z:
+					tex_matrix[i][j] += 'E_{%i}' % (i+1)
+					tex_coeff['E_{%i}' % (i+1)] = z['']
+				if '+-' in z or '-+' in z:
+					b =  z.get('+-', 0.0) + z.get('-+', 0.0)
+					g = -z.get('+-', 0.0) + z.get('-+', 0.0)
+					if len(tex_matrix[i][j]) > 1:
+						tex_matrix[i][j] += " + "
+					tex_matrix[i][j] += 'B_{%i}k^2' % (i+1)
+					tex_coeff['B_{%i}' % (i+1)] = b
+					if g_factor:
+						g_sign = 1 if i < dim / 2 else -1
+						tex_matrix[i][j] += ' %s g_{%i}\\muB\\mathcal{B}' % ('+' if g_sign == 1 else '-', i+1)
+						tex_coeff['g_{%i}' % (i+1)] = g * g_sign * eoverhbar / muB
+					else:
+						tex_matrix[i][j] += ' + G_{%i}\\mathcal{B}' % (i+1)
+						tex_coeff['G_{%i}' % (i+1)] = g * eoverhbar
+				if '++' in z or '--' in z:
+					f = z.get('++', 0.0) + z.get('--', 0.0)
+					tex_matrix[i][j] += ' + F_{%i}(k_+^2+k_-^2)' % (i+1)
+					tex_coeff['F_{%i}' % (i+1)] = f / 2
+					fp = z.get('++', 0.0) - z.get('--', 0.0)
+					if abs(z['++'] - z['--']) > 1e-9:
+						sys.stderr.write('Warning (tex_bhz_matrix_generic): Violation of hermiticity. Diagonal matrix element at (%i, %i) is not real.\n' % (i, j))
+				for o in z:
+					if len(o) >= 4:
+						if len(tex_matrix[i][j]) > 1:
+							tex_matrix[i][j] += " + "
+						tex_matrix[i][j] += "\\mathcal{O}(k^4)"
+						break
+				if len(tex_matrix[i][j]) == 0:
+					tex_matrix[i][j] = "0"
+			elif i < j:
+				if '' in z:
+					tex_matrix[i][j] += 'C_{%i,%i}' % (i+1,j+1)
+					tex_coeff['C_{%i,%i}' % (i+1,j+1)] = z['']
+				if len(tex_matrix[i][j]) > 1 and ('+' in z or '-' in 'z'):
+					tex_matrix[i][j] += " + "
+				if '+' in z and '-' in z:
+					tex_matrix[i][j] += 'A_{%i,%i}k_+ + A\'_{%i,%i}k_-' % (i+1, j+1, i+1, j+1)
+					tex_coeff['A_{%i,%i}' % (i+1, j+1)] = z['+']
+					tex_coeff['A\'_{%i,%i}' % (i+1, j+1)] = z['-']
+				elif '+' in z:
+					if abs(np.real(z['+'])) < 1e-7 and abs(np.imag(z['+'])) > thr:
+						pm = '' if np.imag(z['+']) > 0 else '-'
+						tex_matrix[i][j] += '%s\\ii A_{%i,%i}k_+' % (pm, i+1, j+1)
+						tex_coeff['A_{%i,%i}' % (i+1, j+1)] = abs(np.imag(z['+']))
+					else:
+						tex_matrix[i][j] += 'A_{%i,%i}k_+' % (i+1, j+1)
+						tex_coeff['A_{%i,%i}' % (i+1, j+1)] = z['+']
+				elif '-' in z:
+					if abs(np.real(z['-'])) < 1e-7 and abs(np.imag(z['-'])) > thr:
+						pm = '' if np.imag(z['-']) > 0 else '-'
+						tex_matrix[i][j] += '%s\\ii A_{%i,%i}k_-' % (pm, i+1, j+1)
+						tex_coeff['A_{%i,%i}' % (i+1, j+1)] = abs(np.imag(z['-']))
+					else:
+						tex_matrix[i][j] += 'A_{%i,%i}k_-' % (i+1, j+1)
+						tex_coeff['A_{%i,%i}' % (i+1, j+1)] = z['-']
+				if '++' in z and '--' in z:
+					if len(tex_matrix[i][j]) > 1:
+						tex_matrix[i][j] += " + "
+					tex_matrix[i][j] += 'F_{%i,%i}k_+^2 + F\'_{%i,%i}k_-^2' % (i+1, j+1, i+1, j+1)
+					tex_coeff['F_{%i,%i}' % (i+1, j+1)] = z['++']
+					tex_coeff['F\'_{%i,%i}' % (i+1, j+1)] = z['--']
+					# TODO: Shouldn't there be a magnetic field term here also?
+				elif '++' in z:
+					if len(tex_matrix[i][j]) > 1:
+						tex_matrix[i][j] += " + "
+					tex_matrix[i][j] = 'F_{%i,%i}k_+^2' % (i+1, j+1)
+					tex_coeff['F_{%i,%i}' % (i+1, j+1)] = z['++']
+				elif '--' in z:
+					if len(tex_matrix[i][j]) > 1:
+						tex_matrix[i][j] += " + "
+					tex_matrix[i][j] = 'F_{%i,%i}k_-^2' % (i+1, j+1)
+					tex_coeff['F_{%i,%i}' % (i+1, j+1)] = z['--']
+				if '+-' in z or '-+' in z:
+					b =  z.get('+-', 0.0) + z.get('-+', 0.0)
+					g = -z.get('+-', 0.0) + z.get('-+', 0.0)
+					if len(tex_matrix[i][j]) > 1:
+						tex_matrix[i][j] += " + "
+					tex_matrix[i][j] += 'B_{%i,%i}k^2' % (i+1, j+1)
+					tex_coeff['B_{%i,%i}' % (i+1, j+1)] = b
+					if g_factor:
+						tex_matrix[i][j] += ' + g_{%i,%i}\\muB\\mathcal{B}' % (i+1, j+1)
+						tex_coeff['G_{%i,%i}' % (i+1, j+1)] = g * eoverhbar
+					else:
+						tex_matrix[i][j] += ' + G_{%i,%i}\\mathcal{B}' % (i+1, j+1)
+						tex_coeff['G_{%i,%i}' % (i+1, j+1)] = g * eoverhbar
+				for o in z:
+					if len(o) >= 3:
+						if len(tex_matrix[i][j]) > 1:
+							tex_matrix[i][j] += " + "
+						tex_matrix[i][j] += "\\mathcal{O}(k^3)"
+						break
+				if len(tex_matrix[i][j]) == 0:
+					tex_matrix[i][j] = "0"
+			elif i > j:
+				if '' in z:
+					tex_matrix[i][j] += 'C_{%i,%i}' % (j+1,i+1) if abs(np.imag(z['-'])) < 1e-7 else 'C^*_{%i,%i}' % (j+1,i+1)
+				if len(tex_matrix[i][j]) > 1 and ('+' in z or '-' in 'z'):
+					tex_matrix[i][j] += " + "
+				if '+' in z and '-' in z:
+					tex_matrix[i][j] += ('A_{%i,%i}k_-' % (j+1, i+1)) if abs(np.imag(z['-'])) < 1e-7 else ('A^*_{%i,%i}k_-' % (j+1, i+1))
+					tex_matrix[i][j] += (' + A_{%i,%i}k_+' % (j+1, i+1)) if abs(np.imag(z['+'])) < 1e-7 else (' + A^{\\prime*}_{%i,%i}k_+' % (j+1, i+1))
+				elif '+' in z:
+					if abs(np.real(z['+'])) < 1e-7 and abs(np.imag(z['+'])) > thr:
+						pm = '' if np.imag(z['+']) > 0 else '-'
+						tex_matrix[i][j] += ('%s\\ii A_{%i,%i}k_+' % (pm, j+1, i+1))
+					elif abs(np.imag(z['+'])) < 1e-7:
+						tex_matrix[i][j] += ('A_{%i,%i}k_+' % (j+1, i+1))
+					else:
+						tex_matrix[i][j] += ('A^*_{%i,%i}k_+' % (j+1, i+1))
+				elif '-' in z:
+					if abs(np.real(z['-'])) < 1e-7 and abs(np.imag(z['-'])) > thr:
+						pm = '' if np.imag(z['-']) > 0 else '-'
+						tex_matrix[i][j] += ('%s\\ii A_{%i,%i}k_-' % (pm, j+1, i+1))
+					elif abs(np.imag(z['-'])) < 1e-7:
+						tex_matrix[i][j] += ('A_{%i,%i}k_-' % (j+1, i+1))
+					else:
+						tex_matrix[i][j] += ('A^*_{%i,%i}k_-' % (j+1, i+1))
+				if '++' in z and '--' in z:
+					if len(tex_matrix[i][j]) > 1:
+						tex_matrix[i][j] += " + "
+					tex_matrix[i][j] += ('F_{%i,%i}k_-^2' % (j+1, i+1)) if abs(np.imag(z['--'])) < 1e-7 else ('F^*_{%i,%i}k_-^2' % (j+1, i+1))
+					tex_matrix[i][j] += (' + F\'_{%i,%i}k_+^2' % (j+1, i+1)) if abs(np.imag(z['++'])) < 1e-7 else (' + F^{\\prime*}_{%i,%i}k_+^2' % (j+1, i+1))
+				elif '++' in z:
+					if len(tex_matrix[i][j]) > 1:
+						tex_matrix[i][j] += " + "
+					tex_matrix[i][j] = ('F_{%i,%i}k_+^2' % (j+1, i+1)) if abs(np.imag(z['++'])) < 1e-7 else ('F^*_{%i,%i}k_+^2' % (j+1, i+1))
+				elif '--' in z:
+					if len(tex_matrix[i][j]) > 1:
+						tex_matrix[i][j] += " + "
+					tex_matrix[i][j] = ('F_{%i,%i}k_-^2' % (j+1, i+1)) if abs(np.imag(z['--'])) < 1e-7 else ('F^*_{%i,%i}k_-^2' % (j+1, i+1))
+				elif '+-' in z or '-+' in z:
+					# b =  z.get('+-', 0.0) + z.get('-+', 0.0)
+					# g = -z.get('+-', 0.0) + z.get('-+', 0.0)
+					if len(tex_matrix[i][j]) > 1:
+						tex_matrix[i][j] += " + "
+					tex_matrix[i][j] += 'B_{%i,%i}k^2' % (j+1, i+1)
+					if g_factor:
+						tex_matrix[i][j] += ' + g_{%i,%i}\\muB\\mathcal{B}' % (j+1, i+1)
+					else:
+						tex_matrix[i][j] += ' + G_{%i,%i}\\mathcal{B}' % (j+1, i+1)
+				for o in z:
+					if len(o) >= 3:
+						if len(tex_matrix[i][j]) > 1:
+							tex_matrix[i][j] += " + "
+						tex_matrix[i][j] += "\\mathcal{O}(k^3)"
+						break
+				if len(tex_matrix[i][j]) == 0:
+					tex_matrix[i][j] = "0"
+	return tex_matrix, tex_coeff
+
+def tex_print_bhz_matrix(filename, hsym, basis = None, thr = 1e-2, print_zeros = True, multicol = 4, run_latex = True, includeplot = None, k0 = None):
+	"""Format BHZ matrix as TeX output.
+
+	Arguments:
+	filename     String. Name of the output file.
+	hsym         SymbolicMatrix instance. The symbolic matrix that encodes the
+	             BHZ-like Hamiltonian. This is the result of do_bhz().
+	basis        List of strings or None. Labels of the basis elements. This is
+	             one of the return values of do_bhz().
+	thr          Float. Minimal magnitude for a coefficient to be treated as
+	             nonzero.
+	print_zeros  True or False. If True, explicitly print zero coefficients too.
+	multicol     Integer or None. Number of columns in the coefficient list.
+	             This is an argument for the LaTeX environment 'multicols' of
+	             the multicol package. If set to None, 0, or 1, do not use
+	             'multicols'.
+	run_latex    True or False. If True, call PDFLaTeX to compile the TeX file.
+	             If False, do not do so. For this option to work, the command
+	             'pdflatex' must be set up on the system properly; otherwise an
+	             error message will be shown.
+	includeplot  String or None. If set, the filename of the plot that should be
+	             included into the TeX document. This is typically the k.p
+	             dispersion result with the BHZ-like dispersion as overlay. If
+	             None, do not include a plot.
+	k0           Vector, float, or None. If not None, replace k by \\tilde{k}
+	             and indicate the anchor point.
+
+	No return value.
+	"""
+	# preamble
+	s = "\\documentclass[a4paper,11pt]{article}\n"
+	s += "\\usepackage[a4paper,margin=25mm,landscape]{geometry}\n"
+	s += "\\usepackage{amsmath}\n"
+	if multicol is not None and multicol > 1:
+		s += "\\usepackage{multicol}\n"
+	if includeplot is not None:
+		s += "\\usepackage{graphicx}\n"
+		s += "  \\graphicspath{{/}}\n"
+	s += "\n"
+	s += "\\newcommand{\\ii} {\\mathrm{i}}\n"
+	s += "\\newcommand{\\ee} {\\mathrm{e}}\n"
+	s += "\\newcommand{\\muB} {\\mu_\\mathrm{B}}\n"
+	s += "\\newcommand{\\ket}[1] {\\lvert #1 \\rangle}\n"
+	s += "\\newcommand{\\nm} {\\,\\mathrm{nm}}\n"
+	s += "\\newcommand{\\meV} {\\,\\mathrm{meV}}\n"
+	s += "\\newcommand{\\meVnm} {\\,\\mathrm{meV}\\,\\mathrm{nm}}\n"
+	s += "\\newcommand{\\meVnmnm} {\\,\\mathrm{meV}\\,\\mathrm{nm}^2}\n"
+	s += "\\newcommand{\\meVT} {\\,\\mathrm{meV}/\\mathrm{T}}\n"
+	s += "\n"
+	s += "\\title{BHZ model}\n"
+	s += "\\author{}\n"
+	s += "\\date{\\today}\n"
+	s += "\n"
+	# actual document
+	s += "\\begin{document}\n"
+	s += "%\\maketitle\n"
+	s += "\\section{BHZ model}\n"
+
+	if basis is not None and basis != []:
+		s += "\\noindent Basis: \n$"
+		s += ", ".join([tex_basis_state(b) for b in basis])
+		s += "$\n\n\\bigskip"
+
+	## For nonzero k0, the default is to express the Hamiltonian in terms of
+	## k-tilde = k - k0. The configuration value may be set to False to express
+	## the values in terms of k itself.
+	print_ktilde = get_config_bool('bhz_ktilde')
+	if k0 is not None and k0 != 0.0:
+		if isinstance(k0, Vector):
+			k0x, k0y = k0.xy()
+		elif isinstance(k0, (float, int)):
+			k0x, k0y = k0, 0.0
+		else:
+			raise TypeError("Invalid type for argument k0")
+		if not print_ktilde:
+			hsym = hsym.shift((-k0x, -k0y))
+
+	dim = hsym.dim
+
+	## Get matrix elements
+	if dim == 4 and get_config_bool('bhz_abcdm'):
+		tex_matrix, tex_coeff = tex_bhz_matrix_fourband(hsym, thr = thr)
+	else:
+		tex_matrix, tex_coeff = tex_bhz_matrix_generic(hsym, thr = thr)
+	mat_elmnt = ''
+
+	if print_ktilde and k0 is not None and k0 != 0.0:
+		for i in range(0, dim):
+			for j in range(0, dim):
+				tex_matrix[i][j] = k_replace(tex_matrix[i][j])
+		s += "\\noindent Shifted momentum operators: $\\tilde{\\mathbf{k}} \\equiv \\mathbf{k} - \\mathbf{k}^0$,\n"
+		s += "where $\\mathbf{k}^0 = (%g, %g)\\nm^{-1}$." % (k0x, k0y)
+		s += "\n\n\\bigskip"
+
+	s += '\\noindent Hamiltonian:\n'
+	if dim > 10:  # this is a limit set by the AMSmath package
+		s += '\\setcounter{MaxMatrixCols}{%i}    %% Column limit of amsmath package\n' % dim
+	s += '\\begin{equation}\\label{eq:bhzham}\n'
+	s += '\\begin{pmatrix}\n'
+	for i in range(0, dim):
+		for j in range(0, dim):
+			s += '  '
+			if j > 0:
+				s += '& '
+			if j < i and len(tex_matrix[j][i]) > 280 // dim:
+				s += "H_{%i,%i}^*" % (j+1, i+1)
+			elif j < i and tex_matrix[j][i] == "0":
+				s += "0"
+			elif len(tex_matrix[i][j]) > 280 // dim:
+				s += "H_{%i,%i}" % (i+1, j+1)
+				mat_elmnt += ("H_{%i,%i} &= " % (i+1, j+1)) + tex_matrix[i][j] + r',\\' + '\n'
+			else:
+				s += tex_matrix[i][j]
+			s += '\n'
+		if i < dim - 1:
+			s += r'\\'
+			s += '\n'
+	s += "\\end{pmatrix},\n"
+	s += "\\end{equation}\n"
+
+	if len(mat_elmnt) > 0:
+		s += '\\begin{align}\n'
+		s += mat_elmnt.strip("\\\n")  # tex_splitterms(mat_elmnt,4)
+		s += '\n\\end{align}\n'
+
+	if multicol is not None and multicol > 1:
+		s += "\\begin{multicols}{%i}[\\noindent with:]\n" % multicol
+		s += "\\noindent\n"
+	else:
+		s += r"\noindent with:\\" + "\n"
+
+	unit = {'A': '\\meVnm', 'B': '\\meVnmnm', 'C': '\\meV', 'D': '\\meVnmnm', 'E': '\\meV', 'F': '\\meVnmnm', 'G': '\\meVT', 'M': '\\meV', 'R': '\\meVnm'}
+	default_fmt = "%.0f"
+	fmt = {'A': '%.0f', 'B': '%.0f', 'C': '%.2f', 'D': '%.0f', 'E': '%.2f', 'F': '%.0f', 'G': '%.3f', 'g': '%.2f', 'M': '%.2f', 'R': '%.2f'}
+	for j, coeff in enumerate(sorted(tex_coeff)):
+		u = ''
+		for u1 in unit:
+			if u1 in coeff.split('_')[0]:  # do not consider subscripts
+				u = unit[u1]
+				break
+		ff = default_fmt
+		for f1 in fmt:
+			if f1 in coeff.split('_')[0]:  # do not consider subscripts
+				ff = fmt[f1]
+				break
+		if abs(np.imag(tex_coeff[coeff])) < 1e-7:
+			s += "  $" + coeff + " = " + (ff % np.real(tex_coeff[coeff])) + u
+		else:
+			valstr = polar(tex_coeff[coeff], ff, '%.0f').replace('exp(1.j *', '\\,\\ee^{\\ii\\times').replace('j', '\\ii').replace('deg)', '^\\circ}')
+			if 'verbose' in sys.argv:
+				print(polar(tex_coeff[coeff], ff, '%.0f'))
+				print(valstr)
+			s += "  $" + coeff + " = " + valstr + u
+		if j == len(tex_coeff) - 1:
+			s += r'$.\\' + '\n'
+		elif j == len(tex_coeff) - 2:
+			s += r'$, and\\' + '\n'
+		else:
+			s += r"$,\\" + "\n"
+
+	if multicol is not None and multicol > 1:
+		s += "\\end{multicols}\n"
+
+	# Warning for g / G factors
+	for coeff in tex_coeff:
+		if 'g' in coeff.split('_')[0]:  # do not consider subscripts
+			s += "\n\\noindent\\textsc{Note}: $g$ factors are orbital contributions only.\n"
+			break
+		if 'G' in coeff.split('_')[0]:
+			s += "\n\\noindent\\textsc{Note}: $G$ factors are orbital contributions only.\n"
+			break
+
+	if includeplot is not None:
+		s += "\n\\newpage\n\n"
+		s += "\\section{Dispersion}\n"
+		spl = includeplot.split('.')
+		if len(spl) > 2:
+			includeplot1 = "__".join(spl[:-1]) + "." + spl[-1]
+			try:
+				os.rename(includeplot, includeplot1)
+			except FileExistsError:  # Also overwrite the renamed file silently in Windows, if it exists.
+				os.remove(includeplot1)
+				os.rename(includeplot, includeplot1)
+			sys.stderr.write("Warning (tex_print_bhz_matrix): Renamed %s to %s\n" % (includeplot, includeplot1))
+		else:
+			includeplot1 = includeplot
+		s += "\\includegraphics[height=150mm]{%s}\n\n" % includeplot1
+
+	s += r"\end{document}"
+	s += '\n\n'
+
+	# LaTeX does not like file names with '.' in it
+	spl = filename.split('.')
+	if len(spl) > 2:
+		filename1 = "__".join(spl[:-1]) + "." + spl[-1]
+		sys.stderr.write("Warning (tex_print_bhz_matrix): Output file is %s instead of requested %s\n" % (filename1, filename))
+		filename = filename1
+
+	f = open(filename, "w")
+	f.write(s)
+	f.close()
+	if run_latex:
+		sys.stderr.write("Run 'pdflatex %s' ...\n" % filename)
+		f_stdout = open("pdflatex.log", 'w')
+		try:
+			subp.check_call(["pdflatex", "-interaction=batchmode", filename], stdout = f_stdout)
+		except OSError:
+			sys.stderr.write("PDFLaTeX is not available\n")
+		except:
+			sys.stderr.write("PDFLaTeX has failed; see pdflatex.log\n")
+		else:
+			sys.stderr.write("PDFLaTeX has completed successfully.\n")
+		f_stdout.close()
+
+	return
diff --git a/kdotpy-v1.0.0/src/kdotpy/cmdargs/__init__.py b/kdotpy-v1.0.0/src/kdotpy/cmdargs/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..597a912a7bac1dac092142390b4d4fc3b19a1d2e
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/cmdargs/__init__.py
@@ -0,0 +1,47 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+from .range import grid
+
+from .cmdargs import cpus, gpu_workers, threads
+from .cmdargs import params, options, plot_options, initialize_opts, outdir, outputid
+from .cmdargs import bandalign, bhz, erange, plotwf, resume_from, vectorvalues
+from .cmdargs import sysargv
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/cmdargs/cmdargs.py b/kdotpy-v1.0.0/src/kdotpy/cmdargs/cmdargs.py
new file mode 100644
index 0000000000000000000000000000000000000000..68aaf49df829a169ccf940b581a60cfe5b4fb9a0
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/cmdargs/cmdargs.py
@@ -0,0 +1,2184 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+import os
+import os.path
+import sys
+import re
+import ast
+from datetime import datetime
+from multiprocessing import cpu_count as mp_cpu_count
+from .tools import isint, isfloat, ismaterial, from_pct, CmdArgs
+from . import range as cmdargsrange
+from ..physparams import PhysParams
+from ..materials import allMaterials
+from ..momentum import Vector, VectorGrid
+
+
+### INITIALIZATION ###
+
+def initialize():
+	"""Mark some arguments initialized.
+	For example, when it is too cumbersome to import in some other module, and
+	the arguments below will be parsed anyway."""
+	sysargv.setparsed('verbose')
+	sysargv.setparsed('showetf')
+	sysargv.setparsed('monitoretf')
+	sysargv.setparsed('tempout')
+	sysargv.setparsed('keepeivecs')
+	for argi, arg in enumerate(sysargv):
+		if arg.lower() == 'config':
+			sysargv.setparsed(argi)
+			sysargv.setparsednext(1)
+
+# Initialize main list of arguments. The CmdArgs instance keeps track of which
+# arguments have been parsed. Set a few very common arguments as parsed already.
+sysargv = CmdArgs(sys.argv)
+initialize()
+
+### BASIC GET-VALUE FUNCTIONS ###
+
+### SPECIAL PURPOSE ARGUMENT PARSERS ###
+## Are typically called only from within cmdargs.py
+
+def vsurf():
+	"""Parse command-line arguments for surface potential"""
+	vsurf = 0.0
+	vsurf_l = 2.0
+	vsurf_quadratic = False
+	val, arg = sysargv.getval(["vsurf", "vif"])
+	if val is None:
+		pass
+	elif isfloat(val):
+		vsurf = float(val)
+		argi = sysargv.index(arg)
+		try:
+			vsurf_l = float(sysargv[argi+2])
+		except:
+			pass
+		else:
+			sysargv.setparsednext(1)
+		try:
+			vsurf_quadratic = sysargv[argi+3].lower() in ['q', 'quadr', 'quadratic']
+		except:
+			pass
+		if vsurf_quadratic:
+			sysargv.setparsednext(1)
+	else:
+		sys.stderr.write("ERROR (Main): Invalid value for argument \"%s\"\n" % arg)
+		exit(1)
+	return vsurf, vsurf_l, vsurf_quadratic
+
+def strain():
+	"""Parse command-line arguments for strain"""
+	val, arg = sysargv.getval(['strain'])
+	if arg is None or arg == "":
+		return None
+
+	# First argument
+	if val is None:
+		sys.stderr.write("ERROR: Absent or invalid value for argument \"%s\"\n" % arg)
+		exit(1)
+	if val.lower() in ["no", "none", "off"]:
+		return 'none'
+	if val == '-':
+		rel_strain_x = None
+	elif '%' in val or '.' in val or val == '0':
+		rel_strain_x = from_pct(val)
+	else:
+		sys.stderr.write("ERROR: Absent or invalid value for argument \"%s\"\n" % arg)
+		exit(1)
+
+	# Second argument
+	argi = sysargv.index(arg)
+	if argi + 2 >= len(sysargv):
+		return rel_strain_x
+	val = sysargv[argi + 2]
+	rel_strain_y = None
+	if val in ["z", "001"]:
+		sys.stderr.write("Warning: Indication of strain axis as argument to 'strain' is deprecated, so the argument '%s' is ignored.\n" % val)
+	elif val in ["x", "z", "001", "100", "no", "none", "off"]:
+		sys.stderr.write("ERROR: Indication of strain axis as argument to 'strain' is deprecated. For any other axis than 'z' or '001', use 'strain' with multiple numerical arguments.\n")
+		exit(1)
+	elif val == '-':
+		sysargv.setparsed(argi + 2)
+	elif '%' in val or '.' in val or val == '0':
+		rel_strain_y = from_pct(val)
+		sysargv.setparsed(argi + 2)
+	else:
+		return rel_strain_x
+
+	# Third argument
+	if argi + 3 >= len(sysargv):
+		return rel_strain_x if rel_strain_y is None else (rel_strain_x, rel_strain_y, None)
+	val = sysargv[argi + 3]
+	rel_strain_z = None
+	if val == '-':
+		sysargv.setparsed(argi + 3)
+	elif '%' in val or '.' in val or val == '0':
+		rel_strain_z = from_pct(val)
+		sysargv.setparsed(argi + 3)
+	if rel_strain_y is None and rel_strain_z is None:
+		return rel_strain_x
+	else:
+		return (rel_strain_x, rel_strain_y, rel_strain_z)
+
+def potential(arg):
+	potential_opts = []
+	argn = 0
+	while argn + 1 < len(sysargv):
+		if sysargv[argn].lower() in arg:
+			sysargv.setparsed(argn)
+			fn = sysargv[argn + 1]
+			try:
+				pf = open(fn, "r")
+			except:
+				sys.stderr.write("ERROR (Main): Potential file \'%s\' does not exist.\n" % fn)
+				exit(1)
+			else:
+				pf.close()
+			potential_opts.append(fn)
+			sysargv.setparsed(argn+1)
+			argn += 2
+			while argn < len(sysargv):
+				val = sysargv[argn]
+				if isfloat(val):
+					potential_opts.append(float(val))
+				else:
+					try:
+						pf = open(val, "r")
+					except:
+						break
+					else:
+						pf.close()
+					potential_opts.append(val)
+				sysargv.setparsed(argn)
+				argn += 1
+		else:
+			argn += 1
+	return potential_opts
+
+def selfcon():
+	"""Parse command-line arguments for self-consistent Hartree calculation.
+	Including max_iterations and accuracy."""
+	val, arg = sysargv.getval(["selfcon"])
+	selfcon_max_it = None
+	selfcon_acc = None
+
+	if isint(val):
+		selfcon_max_it = int(val)
+	elif isfloat(val):
+		selfcon_acc = float(val)
+	else:
+		val = None
+	if val is not None:
+		try:
+			argi = sysargv.index(arg)
+			val = sysargv[argi+2]
+		except:
+			pass
+		else:
+			if isint(val):
+				if selfcon_max_it is not None:
+					selfcon_acc = float(val)
+				else:
+					selfcon_max_it = int(val)
+				sysargv.setparsednext(1)
+			elif isfloat(val):
+				selfcon_acc = float(val)
+				sysargv.setparsednext(1)
+	return selfcon_max_it, selfcon_acc
+
+
+def potential_bc():
+	"""Parse command line arguments for custom boundary conditions
+	for solving Poisson's equation.
+	Three possible input formats:
+	1. {'v1':10,'z1':15.}
+	2. explicit 'v12[-30.0,30.]=10;v3[0.0]=0' or implicit 'dv[5.]=3;v[10.0]=0'
+	3. 'v1=10;v2=5;z1=-30.0;z2=30.0'
+	z1, z2, z3 must be given in nm (float).
+	"""
+
+	vals, _ = sysargv.getval(["potentialbc", "potbc"])
+
+	if vals is None:
+		return None
+
+	if '{' in vals:
+		# Input type: {'key1':value1,'key2:value2}
+		try:
+			bc = ast.literal_eval(vals)
+		except ValueError:
+			sys.stderr.write(
+				'ERROR (potential_bc): Input format of potential boundary conditions is not compatible. '
+				'Make sure that keys are interpreted as strings. (e.g. "{\'key\':value}")\n'
+			)
+			exit(1)
+		if isinstance(bc, str):
+			sys.stderr.write(
+				'ERROR (potential_bc): Input format of potential boundary conditions could not be interpreted correctly. '
+				'Make sure that keys are interpreted as strings. (e.g. "{\'key\':value}")\n'
+			)
+			exit(1)
+	else:
+		# Input type 'v12[-30.0,30.]=10;v3[0.0]=0' (type 1)
+		# or 'v1=10;v2=5;z1=-30.0;z2=30.0' (type 2)
+		# Both types can be mixed
+		args = vals.split(';')
+		if len(args[0]) == 0:  # emtpy string
+			return None
+
+		# Change order of args if 'v12' is set
+		comma_check = ["," in arg for arg in args]  # Check if "," is in any string. This means 'v12' is set.
+		v12_index = np.argmax(comma_check) if any(comma_check) else None
+		if v12_index is not None:
+			args.insert(0, args.pop(v12_index))  # move 'v12' to first position
+
+		bc = {}
+		arg_idx = 0
+		for arg in args:
+			try:
+				k, v = arg.split('=')
+			except ValueError:
+				sys.stderr.write(
+					f"Warning (potential_bc): Ignoring input '{arg}'. Format not compatible with boundary conditions.\n"
+				)
+				continue
+
+			if "[" in k:  # type 1
+				arg_idx += 1
+				keys, vals = [], []
+				val1 = v
+				key1, val2 = k.replace("]", "").split("[")
+
+				if "," in val2:  # two values in bracket -> must be z-coordinates for v12
+					val2, val3 = val2.split(",")
+					keys.append('z2')  # must always be z2
+					vals.append(val3)
+					arg_idx += 1
+				else:
+					val3 = None
+
+				bc_idx = re.findall(r"\d+", key1)
+				if len(bc_idx) > 0:
+					# explicit input, e.g. 'v1[10.]=5'
+					key2 = f"z{bc_idx[0]}" if val3 is None else 'z1'
+				else:
+					# implicit input, e.g. 'v[10.]=5'
+					key1 += str(arg_idx) if val3 is None else '12'
+					key2 = f"z{arg_idx}" if val3 is None else 'z1'
+
+				if val3 is not None and key1 != 'v12':  # Final check for 'v12'
+					sys.stderr.write(
+						f"Warning (potential_bc): Variable name {key1} for boundary condition "
+						f"is incompatible with input format. Renaming to 'v12'.\n"
+					)
+					key1 = 'v12'
+
+				keys.extend([key2, key1])
+				vals.extend([val2, val1])
+
+			else:  # type 2
+				keys = [k]
+				vals = [v]
+
+			# Convert string values to int/float
+			for key, val in zip(keys, vals):
+				if key in bc:
+					sys.stderr.write(f"Warning (potential_bc): The {key=} is given multiple times. Please check the input format. (Ignoring input {arg}.)\n")
+					continue
+				if isfloat(val):
+					val = float(val)
+				elif isinstance(val, str):
+					pass
+				else:
+					sys.stderr.write(f"Warning (potential_bc): Unknown input format '{key}={val}'.\n")
+					continue
+				bc.update({key: val})
+
+	deleted = []
+	loop_bc = bc.copy()
+	allowed_keys = ["v1", "dv1", "v2", "dv2", "v12", "v3", "z1", "z2", "z3"]
+	for key in loop_bc:
+		if key not in allowed_keys:
+			del bc[key]
+			deleted.append(key)
+
+	if len(deleted) > 0:
+		sys.stderr.write(f"Warning (potential_bc): Incompatible boundary conditions {deleted} are ignored. Choose from {allowed_keys}.\n")
+
+	return bc
+
+
+def depletion():
+	"""Parse command-line arguments for depletion charge and depletion length"""
+	# depletion charge
+	val, arg = sysargv.getval(["ndepletion", "ndep", "ndepl"])
+	if val is None:
+		ndepl = None
+	elif isfloat(val):
+		ndepl1 = float(val)
+		argi = sysargv.index(arg)
+		try:
+			ndepl2 = float(sysargv[argi+2])
+		except:
+			ndepl = ndepl1
+		else:
+			sysargv.setparsednext(1)
+			ndepl = [ndepl1, ndepl2]
+	else:
+		sys.stderr.write("ERROR (Main): Invalid value for argument \"%s\"\n" % arg)
+		exit(1)
+
+	# depletion length (width)
+	val, arg = sysargv.getval(["ldepletion", "ldep", "ldepl"])
+	if val is None:
+		ldepl = None
+		return ndepl, ldepl
+	elif val.lower() in ["inf", "-"]:
+		ldepl1 = None
+	elif isfloat(val):
+		ldepl1 = float(val)
+	else:
+		sys.stderr.write("ERROR (Main): Invalid value for argument \"%s\"\n" % arg)
+		exit(1)
+	argi = sysargv.index(arg)
+	try:
+		val2 = sysargv[argi+2]
+	except:
+		val2 = None
+	else:
+		sysargv.setparsednext(1)
+	if val2 is None:
+		ldepl = ldepl1
+	elif val2.lower() in ["inf", "-"]:
+		ldepl2 = None
+		ldepl = [ldepl1, ldepl2]
+	elif isfloat(val2):
+		ldepl2 = float(val2)
+		ldepl = [ldepl1, ldepl2]
+	else:
+		ldepl = ldepl1
+
+	return ndepl, ldepl
+
+def broadening(arg = None, allow_extra_val = True):
+	"""Parse command-line arguments for (Landau-level) broadening"""
+	broadening_widths = []
+	broadening_types = []
+	broadening_deps = []
+	extra_val = None
+	argn = 0
+	parsed_arg = None
+	if arg is None:
+		arg = ["broadening", "llbroadening"]
+	elif isinstance(arg, str):
+		arg = [arg]
+	while argn < len(sysargv):
+		if not sysargv[argn].lower().replace('_', '') in arg:
+			argn += 1
+			continue
+		sysargv.setparsed(argn)
+		parsed_arg = sysargv[argn]
+		argn += 1
+		broadening_widths.append(None)
+		broadening_types.append(None)
+		broadening_deps.append(None)
+		while argn < len(sysargv):
+			val = sysargv[argn].lower()
+			m1 = re.match(r"(\^|\*\*)([-+]?[0-9]+)/([0-9]+)", val)
+			m2 = re.match(r"(\^|\*\*)([-+]?[0-9]+\.?[0-9]*)", val)
+			if isfloat(val):
+				if broadening_widths[-1] is None:
+					broadening_widths[-1] = float(val)
+				elif extra_val is None:
+					extra_val = float(val)
+					extra_val_arg = parsed_arg
+				else:
+					sys.stderr.write("ERROR: Broadening got multiple values for broadening width.\n")
+					exit(1)
+			# Broadening types
+			elif val in ['fermi', 'logistic', 'sech', 'thermal', 'gauss', 'gaussian', 'normal', 'lorentz', 'lorentzian', 'step', 'delta']:
+				if broadening_types[-1] is None:
+					broadening_types[-1] = val
+				else:
+					sys.stderr.write("ERROR: Broadening got multiple values for broadening type.\n")
+					exit(1)
+			# Scaling types (dependence)
+			elif val in ['auto', 'automatic', 'const', 'lin', 'linear', 'sqrt', 'cbrt']:
+				if broadening_deps[-1] is None:
+					broadening_deps[-1] = val
+				else:
+					sys.stderr.write("ERROR: Broadening got multiple values for broadening dependence.\n")
+					exit(1)
+			elif m1 is not None:
+				if broadening_deps[-1] is None:
+					broadening_deps[-1] = float(m1.group(2)) / float(m1.group(3))
+				else:
+					sys.stderr.write("ERROR: Broadening got multiple values for broadening dependence.\n")
+					exit(1)
+			elif m2 is not None:
+				if broadening_deps[-1] is None:
+					broadening_deps[-1] = float(m2.group(2))
+				else:
+					sys.stderr.write("ERROR: Broadening got multiple values for broadening dependence.\n")
+					exit(1)
+			else:
+				# Berry fraction (only with one broadening argument)
+				try:
+					extra_val = from_pct(val) * broadening_widths[-1] if '%' in val else float(val)
+					extra_val_arg = parsed_arg
+				except:
+					break
+			sysargv.setparsed(argn)
+			argn += 1
+
+	broadening = []
+	for bw, bt, bd in zip(broadening_widths, broadening_types, broadening_deps):
+		if bw is None and bt is None:
+			sys.stderr.write("ERROR: Broadening parameter without type and/or width.\n")
+			exit(1)
+		if bw is None and (bt is not None and bt not in ['thermal', 'step', 'delta']):
+			sys.stderr.write("ERROR: Broadening width parameter missing for broadening type '%s'.\n" % ('auto' if bt is None else bt))
+			exit(1)
+		if (bw is not None and bw != 0.0) and bt in ['step', 'delta']:
+			sys.stderr.write("Warning: Broadening width parameter is ignored for broadening type '%s'.\n" % bt)
+			bw = 0.0
+		broadening.append((bw, 'auto' if bt is None else bt, 'auto' if bd is None else bd))
+	if extra_val is not None:
+		if not allow_extra_val:
+			sys.stderr.write("ERROR: Extra numerical value not permitted for argument %s.\n" % extra_val_arg)
+			exit(1)
+		if len(broadening_widths) > 1:
+			sys.stderr.write("ERROR: Input of Berry broadening with respect to DOS broadening is permitted only in combination with a single broadening parameter.\n")
+			exit(1)
+		extra_val = (extra_val, broadening[0][1], broadening[0][2])
+
+	return broadening, extra_val
+
+def broadening_setopts(opts, s, broadening_val):
+	"""Set broadening output from broadening() into opts (dict instance)"""
+	if isinstance(broadening_val, tuple):
+		opts[s + '_scale'], opts[s + '_type'], opts[s + '_dep'] = broadening_val
+	elif len(broadening_val) == 1:
+		opts[s + '_scale'], opts[s + '_type'], opts[s + '_dep'] = broadening_val[0]
+	elif len(broadening_val) > 1:
+		opts[s + '_scale'] = [b[0] for b in broadening_val]
+		opts[s + '_type'] = [b[1] for b in broadening_val]
+		opts[s + '_dep'] = [b[2] for b in broadening_val]
+	return opts
+
+def efield():
+	"""Parse command-line arguments for electric field"""
+	# depletion charge
+	val, arg = sysargv.getval(["efield"], 2)
+	if val is None:
+		return None
+	elif len(val) != 2:
+		sys.stderr.write("ERROR: Argument \"%s\" should be followed by two additional arguments\n" % arg)
+		exit(1)
+	if isfloat(val[0]) and isfloat(val[1]):
+		return [float(val[0]), float(val[1])]
+	elif val[0].lower() in ['t', 'top'] and isfloat(val[1]):
+		return [None, float(val[1])]
+	elif val[0].lower() in ['b', 'btm', 'bottom'] and isfloat(val[1]):
+		return [float(val[1]), None]
+	elif val[1].lower() in ['t', 'top'] and isfloat(val[0]):
+		return [None, float(val[0])]
+	elif val[1].lower() in ['b', 'btm', 'bottom'] and isfloat(val[0]):
+		return [float(val[0]), None]
+	elif val[0] in ['-', '--'] and isfloat(val[1]):
+		return [None, float(val[1])]
+	elif val[1] in ['-', '--'] and isfloat(val[0]):
+		return [float(val[0]), None]
+	else:
+		sys.stderr.write("ERROR: Argument \"%s\" should be followed by arguments in the following pattern (where ## is a numerical value):\n  \"%s ## ##\"\n  \"%s -- ##\", \"%s t ##\", \"%s top ##\", \"%s ## top\"\n  \"%s ## --\", \"%s b ##\", \"%s btm ##\", \"%s ## btm\"\n" % (arg, arg, arg, arg, arg, arg, arg, arg, arg, arg))
+		exit(1)
+
+def erange():
+	"""Parse command-line arguments for energy range"""
+	val, arg = sysargv.getval("erange", 2)
+	if val is None:
+		return [-100.0, 100.0]
+	if isinstance(val, list) and len(val) in [1, 2]:
+		try:
+			e1 = float(val[0])
+		except:
+			sys.stderr.write("ERROR: Invalid value for argument \"%s\"\n" % arg)
+			exit(1)
+		try:
+			e2 = float(val[1])
+		except:
+			e2 = -e1
+		return [min(e1, e2), max(e1, e2)]
+	sys.stderr.write("ERROR: Invalid value for argument \"%s\"\n" % arg)
+	exit(1)
+
+def transitions():
+	"""Parse command-line arguments for optical transitions"""
+	val, arg = sysargv.getval("transitions", 3, mark = None)
+	if arg.lower() == 'transitions':
+		sysargv.setparsed('transitions')
+	if val is None:
+		return arg == 'transitions'  # Return True if 'transitions' is given as the final argument
+	if not isinstance(val, list):
+		sys.stderr.write("ERROR: Invalid value for argument \"%s\"\n" % arg)
+		exit(1)
+
+	if len(val) == 3 and isfloat(val[0]) and isfloat(val[1]) and isfloat(val[2]):
+		x1 = float(val[0])
+		x2 = float(val[1])
+		x3 = float(val[2])
+		sysargv.setparsednext(3)
+		if (x3 < 0.0 or x3 > 1.0) and x1 >= 0.0 and x1 <= 1.0:
+			return [(min(x2, x3), max(x2, x3)), x1]
+		elif x3 >= 0.0 and x3 <= 1.0:
+			return [(min(x1, x2), max(x1, x2)), x3]
+	elif len(val) >= 2 and isfloat(val[0]) and isfloat(val[1]):
+		x1 = float(val[0])
+		x2 = float(val[1])
+		sysargv.setparsednext(2)
+		return [(min(x1, x2), max(x1, x2)), True]
+	elif len(val) >= 1 and isfloat(val[0]):
+		x1 = float(val[0])
+		if x1 < 0.0 or x1 > 1.0:
+			sys.stderr.write("ERROR: Invalid value for argument \"%s\"\n" % arg)
+			exit(1)
+		sysargv.setparsednext(1)
+		return [None, x1]
+	elif len(val) >= 0:
+		return True
+	sys.stderr.write("ERROR: Invalid value for argument \"%s\"\n" % arg)
+	exit(1)
+
+def cpus():
+	"""Parse command-line arguments for number of processes for multiprocessing / parallel computation"""
+	max_cpus = None
+	num_cpus = 1
+	try:
+		max_cpus = mp_cpu_count()
+	except:
+		sys.stderr.write("Warning (cmdargs.cpus): Cannot determine number of CPU cores\n")
+	val, arg = sysargv.getval(["cpu", "cpus", "ncpu"])
+	if val is None or val.lower() == 'max' or val.lower().startswith('auto'):
+		if max_cpus is None:
+			num_cpus = 1
+			sys.stderr.write("Warning (cmdargs.cpus): Implicitly set to single CPU core, because default/maximum could not be determined.\n")
+		else:
+			num_cpus = max_cpus
+	elif isint(val):
+		num_cpus = int(val)
+		if num_cpus <= 0:
+			sys.stderr.write("ERROR (cmdargs.cpus): Invalid value for argument \"%s\"\n" % arg)
+			exit(1)
+	else:
+		sys.stderr.write("ERROR (cmdargs.cpus): Invalid value for argument \"%s\"\n" % arg)
+		exit(1)
+	if max_cpus is not None and num_cpus > max_cpus:
+		sys.stderr.write("Warning (cmdargs.cpus): Number of processes (cpus) is higher than the available number of cpus. This could lead to a significant performance degradation.\n")
+
+	return num_cpus, max_cpus
+
+def threads():
+	"""Parse command-line arguments for number of threads per processes for multithreading / parallel computation"""
+	num_threads = None
+	val, arg = sysargv.getval(["threads", "nthreads"])
+	if arg == "":
+		pass
+	elif isint(val):
+		num_threads = int(val)
+		if num_threads <= 0:
+			sys.stderr.write("ERROR (cmdargs.threads): Invalid value for argument \"%s\"\n" % arg)
+			exit(1)
+	else:
+		sys.stderr.write("ERROR (cmdargs.threads): Invalid value for argument \"%s\"\n" % arg)
+		exit(1)
+	return num_threads
+
+def gpu_workers():
+	"""Parse command-line arguments for number of gpu workers for multithreading / parallel computation"""
+	gpus = None
+	val, arg = sysargv.getval(["gpu", "gpus", "ngpu"])
+	if arg == "":
+		pass
+	elif isint(val):
+		gpus = int(val)
+		if gpus <= 0:
+			sys.stderr.write("ERROR (cmdargs.threads): Invalid value for argument \"%s\"\n" % arg)
+			exit(1)
+	else:
+		sys.stderr.write("ERROR (cmdargs.threads): Invalid value for argument \"%s\"\n" % arg)
+		exit(1)
+	return gpus
+
+def vectorvalues(prefix = 'k', onedim = False, twodim = False, threedim = False, defaultaxis = 'x', magn_epsilon = None):
+	"""Get vector values
+
+	Arguments:
+	prefix        'k', 'b', etc. Quantity. This will affect the command-line
+	              arguments that match as well as the 'prefix' in the resulting
+	              Vectors/VectorGrid.
+	onedim, twodim, threedim    False or True. Whether a grid dimension of 1, 2,
+	                            and 3 dimensions, respectively, are accepted.
+	defaultaxis   'x', 'y', or 'z'. If a Vector with one component is input, how
+	              to interpret it. For example, treat 'k' as 'kx' or 'b' as
+	              'bz'.
+	magn_epsilon  Float. If positive, add points with these values to a range of
+	              magnetic field values, if the range contains zero. If
+	              negative, only add these numbers if the range contains both
+	              positive and negative values.
+
+	Note:
+	The dimensionality boolean values onedim and twodim should not be both
+	False, and twodim must be True if threedim is True, i.e., combinations of
+	dimensionalities allowd are 1, 1+2, 2, 2+3, 1+2+3.
+
+	Exits on error.
+
+	Returns:
+	VectorGrid
+	"""
+	if not (onedim or twodim):
+		sys.stderr.write("ERROR (cmdargs.vectorvalues): onedim and twodim should not be both False.\n")
+		exit(1)
+	if (threedim and not twodim):
+		sys.stderr.write("ERROR (cmdargs.vectorvalues): twodim must be True if threedim is True.\n")
+		exit(1)
+	ranges = {}
+	components = ['', 'perp', 'x', 'y', 'z', 'phi', 'theta']
+	for comp in components:
+		try:
+			vrange = cmdargsrange.grid(args = prefix + comp, from_argv = sysargv)
+			if len(vrange) >= 1:
+				ranges[comp] = vrange
+		except:
+			continue
+		if prefix == 'b' and comp in ['', 'perp', 'x', 'y', 'z'] and len(vrange) >= 1 and magn_epsilon is not None and magn_epsilon != 0.0:
+			ranges[comp] = cmdargsrange.add_epsilon(vrange, magn_epsilon)
+
+	if len(ranges) == 0:
+		return None
+	# Angular unit: degrees by default
+	degrees = 'radians' not in sysargv
+
+	# Directional
+	dirval, dirarg = sysargv.getval(prefix + 'dir', 3)
+	direction = None
+	if dirval is not None:
+		if len(dirval) == 3:
+			try:
+				direction = [int(x) for x in dirval]
+			except:
+				pass
+		if direction is None and len(dirval) >= 1:
+			m = re.match(r"(-?[0-9])(-?[0-9])(-?[0-9])", dirval[0])
+			if m is not None:
+				direction = [int(x) for x in m.groups()]
+		if direction is None:
+			sys.stderr.write("ERROR (cmdargs.vectorvalues): Invalid or missing argument following '%s'.\n" % dirarg)
+			exit(1)
+		if 'x' in ranges or 'y' in ranges or 'z' in ranges or 'perp' in ranges or 'phi' in ranges or 'theta' in ranges:
+			sys.stderr.write("ERROR (cmdargs.vectorvalues): Argument %s may not be combined with other vector components other than %s itself.\n" % (dirarg, prefix))
+			exit(1)
+		dirvec = Vector(*direction, astype = 'xyz')
+		if dirvec.zero(1e-4):
+			sys.stderr.write("ERROR (cmdargs.vectorvalues): Singular direction vector.\n")
+			exit(1)
+		if not threedim and abs(dirvec.z()) > 1e-4:
+			sys.stderr.write("ERROR (cmdargs.vectorvalues): Direction vector must have zero z component in 2D mode.\n")
+			exit(1)
+		if threedim:
+			_, dirtheta, dirphi = dirvec.spherical(deg = True, fold = True)
+			return VectorGrid('r', ranges[''], 'theta', dirtheta, 'phi', dirphi, prefix = prefix, astype = 'sph', deg = True)
+		elif abs(dirvec.z()) > 1e-4:
+			sys.stderr.write("ERROR (cmdargs.vectorvalues): Direction vector must have zero z component in 1D or 2D mode.\n")
+			exit(1)
+		elif twodim:
+			_, dirphi = dirvec.polar(deg = True, fold = True)
+			return VectorGrid('r', ranges[''], 'phi', dirphi, prefix = prefix, astype = 'pol', deg = True)
+		elif abs(dirvec.y()) > 1e-4:
+			sys.stderr.write("ERROR (cmdargs.vectorvalues): Direction vector must have zero y component in 1D.\n")
+			exit(1)
+		else:
+			return VectorGrid('r', ranges[''], prefix = prefix, astype = 'x')
+
+	# Valid combinations
+	if not twodim:  # implies not threedim
+		if 'perp' in ranges or 'y' in ranges or 'z' in ranges or 'phi' in ranges or 'theta' in ranges:
+			sys.stderr.write("ERROR (cmdargs.vectorvalues): Components %sperp, %sy, %sz, %stheta, %sphi are not allowed in 1D mode.\n" % (prefix, prefix, prefix, prefix, prefix))
+			exit(1)
+		if 'x' in ranges and len(ranges) == 1:
+			return VectorGrid('x', ranges['x'], prefix = prefix, astype = 'x')
+		elif '' in ranges and len(ranges) == 1:
+			return VectorGrid('x', ranges[''], prefix = prefix, astype = 'x')
+		else:
+			sys.stderr.write("ERROR (cmdargs.vectorvalues): In 1D mode, either %s or %sx must specify value(s).\n")
+			exit(1)
+
+	if threedim and 'x' in ranges and 'y' in ranges and 'z' in ranges:
+		if len(ranges) > 3:
+			sys.stderr.write("ERROR (cmdargs.vectorvalues): %sx, %sy, and %sz cannot be combined with any other components.\n" % (prefix, prefix, prefix))
+			exit(1)
+		return VectorGrid('x', ranges['x'], 'y', ranges['y'], 'z', ranges['z'], prefix = prefix, astype = 'xyz')
+	elif threedim and '' in ranges and 'phi' in ranges and 'z' in ranges:
+		if len(ranges) > 3:
+			sys.stderr.write("ERROR (cmdargs.vectorvalues): %s, %sphi, and %sz cannot be combined with any other components.\n" % (prefix, prefix, prefix))
+			exit(1)
+		return VectorGrid('r', ranges[''], 'phi', ranges['phi'], 'z', ranges['z'], prefix = prefix, astype = 'cyl', deg = degrees)
+	elif threedim and 'x' in ranges and 'phi' in ranges and len(ranges['phi']) == 1 and 'z' in ranges:
+		# r = x / cos phi
+		if len(ranges) > 3:
+			sys.stderr.write("ERROR (cmdargs.vectorvalues): %sx, %sphi, and %sz cannot be combined with any other components.\n" % (prefix, prefix, prefix))
+			exit(1)
+		phi = ranges['phi'][0]
+		if degrees:
+			phi *= np.pi / 180
+		if abs(np.cos(phi)) < 1e-10:
+			sys.stderr.write("ERROR (cmdargs.vectorvalues): Combination of components %sx, %sphi = %s, %sz is singular.\n" % (prefix, prefix, ranges['phi'][0], prefix))
+			exit(1)
+		return VectorGrid('r', ranges['x'] / np.cos(phi), 'phi', ranges['phi'], 'z', ranges['z'], prefix = prefix, astype = 'cyl', deg = degrees)
+	elif threedim and 'y' in ranges and 'phi' in ranges and len(ranges['phi']) == 1 and 'z' in ranges:
+		# r = y / sin phi
+		if len(ranges) > 3:
+			sys.stderr.write("ERROR (cmdargs.vectorvalues): %sy, %sphi, and %sz cannot be combined with any other components.\n" % (prefix, prefix, prefix))
+			exit(1)
+		phi = ranges['phi'][0]
+		if degrees:
+			phi *= np.pi / 180
+		if abs(np.sin(phi)) < 1e-10:
+			sys.stderr.write("ERROR (cmdargs.vectorvalues): Combination of components %sy, %sphi = %s, %sz is singular.\n" % (prefix, prefix, ranges['phi'][0], prefix))
+			exit(1)
+		return VectorGrid('r', ranges['y'] / np.sin(phi), 'phi', ranges['phi'], 'z', ranges['z'], prefix = prefix, astype = 'cyl', deg = degrees)
+
+	elif threedim and '' in ranges and 'theta' in ranges and 'phi' in ranges:
+		if len(ranges) > 3:
+			sys.stderr.write("ERROR (cmdargs.vectorvalues): %s, %stheta, and %sphi cannot be combined with any other components.\n" % (prefix, prefix, prefix))
+			exit(1)
+		return VectorGrid('r', ranges[''], 'theta', ranges['theta'], 'phi', ranges['phi'], prefix = prefix, astype = 'sph', deg = degrees)
+	elif threedim and 'x' in ranges and 'theta' in ranges and len(ranges['theta']) == 1 and 'phi' in ranges and len(ranges['phi']) == 1:
+		# r = x / sin theta cos phi
+		if len(ranges) > 3:
+			sys.stderr.write("ERROR (cmdargs.vectorvalues): %sx, %stheta, and %sphi cannot be combined with any other components.\n" % (prefix, prefix, prefix))
+			exit(1)
+		phi = ranges['phi'][0]
+		theta = ranges['theta'][0]
+		if degrees:
+			phi *= np.pi / 180
+			theta *= np.pi / 180
+		if abs(np.sin(theta)) < 1e-10 or abs(np.cos(phi)) < 1e-10:
+			sys.stderr.write("ERROR (cmdargs.vectorvalues): Combination of components %sx, %stheta = %s, %sphi = %s is singular.\n" % (prefix, prefix, ranges['theta'][0], prefix, ranges['phi'][0]))
+			exit(1)
+		return VectorGrid('r', ranges['x'] / np.cos(phi) / np.sin(theta), 'theta', ranges['theta'], 'phi', ranges['phi'], prefix = prefix, astype = 'sph', deg = degrees)
+	elif threedim and 'y' in ranges and 'theta' in ranges and len(ranges['theta']) == 1 and 'phi' in ranges and len(ranges['phi']) == 1:
+		# r = y / sin theta sin phi
+		if len(ranges) > 3:
+			sys.stderr.write("ERROR (cmdargs.vectorvalues): %sy, %stheta, and %sphi cannot be combined with any other components.\n" % (prefix, prefix, prefix))
+			exit(1)
+		phi = ranges['phi'][0]
+		theta = ranges['theta'][0]
+		if degrees:
+			phi *= np.pi / 180
+			theta *= np.pi / 180
+		if abs(np.sin(theta)) < 1e-10 or abs(np.sin(phi)) < 1e-10:
+			sys.stderr.write("ERROR (cmdargs.vectorvalues): Combination of components %sy, %stheta = %s, %sphi = %s is singular.\n" % (prefix, prefix, ranges['theta'][0], prefix, ranges['phi'][0]))
+			exit(1)
+		return VectorGrid('r', ranges['y'] / np.sin(phi) / np.sin(theta), 'theta', ranges['theta'], 'phi', ranges['phi'], prefix = prefix, astype = 'sph', deg = degrees)
+	elif threedim and 'z' in ranges and 'theta' in ranges and len(ranges['theta']) == 1 and 'phi' in ranges and len(ranges['phi']) == 1:
+		# r = z / cos theta
+		if len(ranges) > 3:
+			sys.stderr.write("ERROR (cmdargs.vectorvalues): %sz, %stheta, and %sphi cannot be combined with any other components.\n" % (prefix, prefix, prefix))
+			exit(1)
+		theta = ranges['theta'][0]
+		if degrees:
+			theta *= np.pi / 180
+		if abs(np.cos(theta)) < 1e-10:
+			sys.stderr.write("ERROR (cmdargs.vectorvalues): Combination of components %sz, %stheta = %s, %sphi = %s is singular.\n" % (prefix, prefix, ranges['theta'][0], prefix, ranges['phi'][0]))
+			exit(1)
+		return VectorGrid('r', ranges['z'] / np.cos(theta), 'theta', ranges['theta'], 'phi', ranges['phi'], prefix = prefix, astype = 'sph', deg = degrees)
+	elif threedim and 'z' in ranges and len(ranges['z']) == 1 and 'theta' in ranges and 'phi' in ranges and len(ranges['phi']) == 1:
+		# r = z / cos theta
+		if len(ranges) > 3:
+			sys.stderr.write("ERROR (cmdargs.vectorvalues): %sz, %stheta, and %sphi cannot be combined with any other components.\n" % (prefix, prefix, prefix))
+			exit(1)
+		theta = np.array(ranges['theta'])
+		if degrees:
+			theta *= np.pi / 180
+		if np.amin(np.abs(np.cos(theta))) < 1e-10:
+			min_idx = np.argsort(np.abs(np.cos(theta)))[0]
+			sys.stderr.write("ERROR (cmdargs.vectorvalues): Combination of components %sz, %stheta = %s, %sphi = %s is singular.\n" % (prefix, prefix, ranges['theta'][min_idx], prefix, ranges['phi'][0]))
+			exit(1)
+		# convert to cylindrical coordinates
+		sys.stderr.write("Warning (cmdargs.vectorvalues): This combination of components %sz, %stheta, %sphi requires conversion to a nonuniform grid of cylindrical coordinates.\n" % (prefix, prefix, prefix))
+		return VectorGrid('r', ranges['z'][0] / np.cos(theta), 'z', ranges['z'], 'phi', ranges['phi'], prefix = prefix, astype = 'cyl', deg = degrees)
+
+
+	elif threedim and '' in ranges and 'theta' in ranges and len(ranges) == 2:
+		return VectorGrid('r', ranges[''], 'theta', ranges['theta'], prefix = prefix, astype = 'sph', deg = degrees)
+	elif threedim and 'x' in ranges and 'theta' in ranges and len(ranges['theta']) == 1 and len(ranges) == 2:
+		# r = x / sin theta
+		theta = ranges['theta'][0]
+		if degrees:
+			theta *= np.pi / 180
+		if abs(np.sin(theta)) < 1e-10:
+			sys.stderr.write("ERROR (cmdargs.vectorvalues): Combination of components %sx, %stheta = %s is singular.\n" % (prefix, prefix, ranges['theta'][0]))
+			exit(1)
+		return VectorGrid('r', ranges['x'] / np.sin(theta), 'theta', ranges['theta'], prefix = prefix, astype = 'sph', deg = degrees)
+	elif threedim and 'x' in ranges and len(ranges['x']) == 1 and 'theta' in ranges and len(ranges) == 2:
+		# r = x / sin theta
+		theta = np.array(ranges['theta'])
+		if degrees:
+			theta *= np.pi / 180
+		if np.amin(np.abs(np.tan(theta))) < 1e-10:
+			min_idx = np.argsort(np.abs(np.tan(theta)))[0]
+			sys.stderr.write("ERROR (cmdargs.vectorvalues): Combination of components %sx, %stheta = %s is singular.\n" % (prefix, prefix, ranges['theta'][min_idx]))
+			exit(1)
+		# convert to cylindrical coordinates
+		sys.stderr.write("Warning (cmdargs.vectorvalues): This combination of components %sx, %stheta requires conversion to a nonuniform grid of cylindrical coordinates.\n" % (prefix, prefix))
+		return VectorGrid('r', ranges['x'], 'z', ranges['x'][0] / np.tan(theta), prefix = prefix, astype = 'cyl', deg = degrees)
+	elif threedim and 'z' in ranges and 'theta' in ranges and len(ranges['theta']) == 1 and len(ranges) == 2:
+		# r = z / cos theta
+		theta = ranges['theta'][0]
+		if degrees:
+			theta *= np.pi / 180
+		if abs(np.cos(theta)) < 1e-10:
+			sys.stderr.write("ERROR (cmdargs.vectorvalues): Combination of components %sz, %stheta = %s is singular.\n" % (prefix, prefix, ranges['theta'][0]))
+			exit(1)
+		return VectorGrid('r', ranges['z'] / np.cos(theta), 'theta', ranges['theta'], prefix = prefix, astype = 'sph', deg = degrees)
+	elif threedim and 'z' in ranges and len(ranges['z']) == 1 and 'theta' in ranges and len(ranges) == 2:
+		# r = z / cos theta
+		theta = np.array(ranges['theta'])
+		if degrees:
+			theta *= np.pi / 180
+		if np.amin(np.abs(np.cos(theta))) < 1e-10:
+			min_idx = np.argsort(np.abs(np.cos(theta)))[0]
+			sys.stderr.write("ERROR (cmdargs.vectorvalues): Combination of components %sz, %stheta = %s is singular.\n" % (prefix, prefix, ranges['theta'][min_idx]))
+			exit(1)
+		# convert to cylindrical coordinates
+		sys.stderr.write("Warning (cmdargs.vectorvalues): This combination of components %sz, %stheta requires conversion to a nonuniform grid of cylindrical coordinates.\n" % (prefix, prefix))
+		return VectorGrid('r', ranges['z'][0] / np.cos(theta), 'z', ranges['z'], prefix = prefix, astype = 'cyl', deg = degrees)
+
+	elif threedim and 'x' in ranges and 'z' in ranges and len(ranges) == 2:
+		return VectorGrid('x', ranges['x'], 'z', ranges['z'], prefix = prefix, astype = 'xyz')
+	elif threedim and 'z' in ranges and len(ranges) == 1:
+		return VectorGrid('z', ranges['z'], prefix = prefix, astype = 'z')
+	## below here, we have either twodim or threedim
+	elif '' in ranges and 'phi' in ranges:
+		if 'perp' in ranges and len(ranges) == 3:
+			sys.stderr.write("ERROR (cmdargs.vectorvalues): Combination of components %sperp, %sphi not (yet) implemented.\n" % (prefix, prefix))
+			exit(1)  # TODO: Implement suitable vector format; or maybe not ...
+		elif len(ranges) == 2:
+			return VectorGrid('r', ranges[''], 'phi', ranges['phi'], prefix = prefix, astype = 'pol', deg = degrees)
+		else:
+			sys.stderr.write("ERROR (cmdargs.vectorvalues): Illegal combination of components with %s, %sphi.\n" % (prefix, prefix))
+			exit(1)
+	elif 'x' in ranges and 'phi' in ranges and len(ranges['phi']) == 1:
+		# r = x / cos phi
+		phi = ranges['phi'][0]
+		if degrees:
+			phi *= np.pi / 180
+		if abs(np.cos(phi)) < 1e-10:
+			sys.stderr.write("ERROR (cmdargs.vectorvalues): Combination of components %sx, %sphi = %s is singular.\n" % (prefix, prefix, ranges['phi'][0]))
+			exit(1)
+		return VectorGrid('r', ranges['x'] / np.cos(phi), 'phi', ranges['phi'], prefix = prefix, astype = 'pol', deg = degrees)
+	elif 'y' in ranges and 'phi' in ranges and len(ranges['phi']) == 1:
+		# r = y / sin phi
+		phi = ranges['phi'][0]
+		if degrees:
+			phi *= np.pi / 180
+		if abs(np.sin(phi)) < 1e-10:
+			sys.stderr.write("ERROR (cmdargs.vectorvalues): Combination of components %sy, %sphi = %s is singular.\n" % (prefix, prefix, ranges['phi'][0]))
+			exit(1)
+		return VectorGrid('r', ranges['y'] / np.sin(phi), 'phi', ranges['phi'], prefix = prefix, astype = 'pol', deg = degrees)
+	elif 'x' in ranges and 'y' in ranges:
+		if len(ranges) > 2:
+			sys.stderr.write("ERROR (cmdargs.vectorvalues): Illegal combination of components with %sx, %sy.\n" % (prefix, prefix))
+			exit(1)
+		return VectorGrid('x', ranges['x'], 'y', ranges['y'], prefix = prefix, astype = 'xy')
+	elif '' in ranges and 'perp' in ranges:
+		if len(ranges) > 2:
+			sys.stderr.write("ERROR (cmdargs.vectorvalues): Illegal combination of components with %s, %sperp.\n" % (prefix, prefix))
+			exit(1)
+		if defaultaxis == 'x':
+			return VectorGrid('x', ranges[''], 'y', ranges['perp'], prefix = prefix, astype = 'xy')
+		elif defaultaxis == 'y':
+			return VectorGrid('y', ranges[''], 'x', ranges['perp'], prefix = prefix, astype = 'xy')
+		elif defaultaxis == 'z':
+			return VectorGrid('z', ranges[''], 'x', ranges['perp'], prefix = prefix, astype = 'xyz')
+	elif 'x' in ranges:
+		if len(ranges) > 1:
+			sys.stderr.write("ERROR (cmdargs.vectorvalues): Illegal combination of components with %sx.\n" % prefix)
+			exit(1)
+		return VectorGrid('x', ranges['x'], prefix = prefix, astype = 'x')
+	elif '' in ranges:
+		if len(ranges) > 1:
+			sys.stderr.write("ERROR (cmdargs.vectorvalues): Illegal combination of components with %s.\n" % prefix)
+			exit(1)
+		if threedim and defaultaxis == 'z':
+			return VectorGrid('z', ranges[''], prefix = prefix, astype = 'z')
+		elif defaultaxis in ['x', 'y']:
+			return VectorGrid(defaultaxis, ranges[''], prefix = prefix, astype = defaultaxis)
+		else:
+			sys.stderr.write("ERROR (cmdargs.vectorvalues): Illegal value for argument defaultaxis.\n")
+			exit(1)
+	elif 'y' in ranges:
+		if len(ranges) > 1:
+			sys.stderr.write("ERROR (cmdargs.vectorvalues): Illegal combination of components with %sy.\n" % prefix)
+			exit(1)
+		return VectorGrid('y', ranges['y'], prefix = prefix, astype = 'y')
+	# Fallthrough: Illegal combination
+	sys.stderr.write("ERROR (cmdargs.vectorvalues): Illegal combination of components %s (in this dimension).\n" % ", ".join(["'%s%s'" % (prefix, comp) for comp in ranges]))
+	exit(1)
+
+def get_material(arg, temp = 0):
+	"""Parse command-line arguments for materials.
+
+	Arguments:
+	arg     String. Material type argument to look for in sysargv (e.g. mwell)
+	temp    Float. Material temperature for evaluation of band parameters. Must
+	        be >= 0.
+	"""
+	if isinstance(arg, str):
+		arg = [arg]
+	elif not isinstance(arg, list):
+		raise TypeError("arg must be a str or list instance")
+	if temp is None:
+		temp = 0  # If left to be None, Material.evaluate may fail
+
+	materialargs = []
+	argn = 1
+	while argn < len(sysargv):
+		if sysargv[argn].lower() in arg:
+			sysargv.setparsed(argn)
+			while argn < len(sysargv):
+				if argn + 1 >= len(sysargv):
+					break
+				arg1 = sysargv[argn+1]
+				val = from_pct(arg1)
+				if ismaterial(arg1) or arg1 in allMaterials:
+					materialargs.append([arg1])
+					sysargv.setparsed(argn + 1)
+				elif val is not None and len(materialargs) > 0:
+					materialargs[-1].append(val)
+					sysargv.setparsed(argn + 1)
+				else:
+					break
+				argn += 1
+		argn += 1
+	materials = []
+	for args in materialargs:
+		mat = allMaterials.get_from_string(args[0], args[1:], "verbose" in sysargv)
+		if mat is None:
+			continue
+		try:
+			mat = mat.evaluate(T = temp)
+		except Exception as ex:
+			raise ValueError(f"Unable to evaluate material parameters for {mat.name}") from ex
+		if not mat.check_complete():
+			sys.stderr.write(f"ERROR (cmdargs.material): Missing parameters for material {mat.name}.\n")
+			exit(1)
+		if not mat.check_numeric():
+			sys.stderr.write(f"ERROR (cmdargs.material): Some parameters for material {mat.name} did not evaluate to a numerical value.\n")
+			exit(1)
+		materials.append(mat)
+	return materials
+
+def materialparam():
+	"""Take material parameters from command line, either a file name or a string with parameter values.
+	Multiple inputs are possible."""
+	if all(arg not in sys.argv for arg in ["matparam", "materialparam"]):
+		return
+
+	for argn, arg in enumerate(sys.argv):
+		if arg.lower() in ["matparam", "materialparam"]:
+			if argn + 1 >= len(sys.argv):
+				sys.stderr.write(f"ERROR (initialize_config): Argument '{arg}' must be followed by a valid file name or configuration values.\n")
+				exit(1)
+			sysargv.setparsed(argn)
+			sysargv.setparsed(argn + 1)
+			allMaterials.parse_cmdarg(sys.argv[argn + 1])
+	return
+
+def layersizes(arg):
+	"""Helper function for layer sizes"""
+	return sysargv.getfloats(arg, positive = True)
+
+def width_wres(arg):
+	"""Parse command-line arguments for width (extent in y direction) and resolution"""
+	if len(arg) < 2:
+		sys.stderr.write("ERROR: Absent value for argument \"%s\"\n" % arg[0])
+		exit(1)
+
+	w_str = arg[1]
+	narg = 1
+	if len(arg) >= 4 and (arg[2] in ["x", "X", "*", "/"]):
+		w_str += arg[2] + arg[3]
+		narg = 3
+
+	m = re.match(r"(\d+)\s*[xX\*]\s*(\d*.?\d+)", w_str)
+	if m is not None:
+		w_num = int(m.group(1))
+		w_res = float(m.group(2))
+		w_total = w_num * w_res
+		return w_num, w_res, w_total, narg
+	m = re.match(r"(\d*.?\d+)\s*/\s*(\d*.\d+)", w_str)
+	if m is not None:
+		w_total = float(m.group(1))
+		w_res = float(m.group(2))
+		w_num = None
+		return w_num, w_res, w_total, narg
+	m = re.match(r"(\d*.?\d+)\s*/\s*(\d+)", w_str)
+	if m is not None:
+		w_total = float(m.group(1))
+		w_num = int(m.group(2))
+		w_res = None
+		return w_num, w_res, w_total, narg
+	m = re.match(r"(\d*.?\d+)", w_str)
+	if m is not None:
+		w_total = float(m.group(1))
+		w_num = None
+		w_res = None
+		return w_num, w_res, w_total, narg
+	sys.stderr.write("ERROR: Invalid value for argument \"%s\"\n" % arg[0])
+	exit(1)
+
+def orientation():
+	"""Parse command-line arguments for lattice orientation.
+
+	Returns:
+	List containing Nones, floats and/or direction triplets (3-tuples of ints)
+	"""
+	DEG = "\xb0"  # degree sign
+	val, arg = sysargv.getval(['orientation', 'orient'], 3)
+	if val is None:
+		return None
+	orient = []
+	for v in val:
+		if v == "-":
+			orient.append(None)
+			continue
+		m = re.match(r"(-?\d*\.?\d+)[d" + DEG + "]", v)
+		if m is not None:
+			orient.append(float(m.group(1)))
+			continue
+		m = re.match(r"-?\d+\.\d*", v)
+		if m is not None:
+			orient.append(float(v))
+			continue
+		m = re.match(r"-?\d*\.\d+", v)
+		if m is not None:
+			orient.append(float(v))
+			continue
+		m = re.match(r"(-?[0-9])(-?[0-9])(-?[0-9])", v)
+		if m is not None:
+			orient.append((int(m.group(1)), int(m.group(2)), int(m.group(3))))
+			continue
+		m = re.match(r"([-+]?[0-9]+),?([-+]?[0-9]+),?([-+]?[0-9]+)", v)
+		if m is not None:
+			orient.append((int(m.group(1)), int(m.group(2)), int(m.group(3))))
+			continue
+		break
+	if len(orient) == 0:
+		sys.stderr.write("ERROR: Invalid or missing value for argument \"%s\"\n" % arg)
+		exit(1)
+	sys.stderr.write("Warning: The argument \"%s\" activates a new experimental feature. Please double-check your results and report errors.\n" % arg)
+	return orient
+
+### MAIN ARGUMENT PARSERS ###
+## May be called from elsewhere
+
+def params(kdim = None):
+	"""Parse command-line arguments for physical parameters.
+
+	Argument:
+	kdim     1, 2, or 3. Dimensionality of the geometry, i.e., the number of
+	         momentum components.
+
+	Returns:
+	PhysParams instance
+	"""
+	zres = None
+	linterface = None
+	width = None
+	yres = None
+	temperature = None
+	yconfinement = None
+	strain_direction = None
+	a_lattice = None
+	norbitals = None
+
+	# get temperature first, as it is required for the evaluation of some material parameters
+	try:
+		val, arg = sysargv.getval(["temp"])
+		if val is not None:
+			temperature = float(val)
+	except:
+		sys.stderr.write("ERROR: Absent or invalid value for argument \"temp\"\n")
+		exit(1)
+
+	## Material parameters
+	materialparam()
+
+	## Layer materials
+	m_layers = get_material(['mlayer', 'mater', 'material'], temperature)
+	substrate_material = get_material(['msubstrate', 'msubst', 'msub', 'substrate'], temperature)
+	m_well = get_material(['mwell', 'mqw'], temperature)
+	m_barr = get_material(['mbarrier', 'mbarr', 'mbar'], temperature)
+
+	## checks
+	if len(m_layers) > 0:
+		if len(m_well) != 0 or len(m_barr) != 0:
+			sys.stderr.write("ERROR: Material specifications must either be generic (mlayer, mater, material) or specific (mwell, mqw, mbarrier, mbarr, mbar) but cannot be mixed.\n")
+			exit(1)
+	else:  # len(m_layer) == 0
+		if kdim == 3 and len(m_well) == 1:
+			sys.stderr.write("Warning: In bulk mode, \"well\" has no meaning, but using the argument to specify the material is accepted. Please use \"mater\" or \"mlayer\" instead.\n")
+		elif kdim == 3:
+			sys.stderr.write("ERROR: In bulk mode, \"well\" has no meaning. The correct material should be specified once. Please use \"mater\" or \"mlayer\" for this.\n")
+			exit(1)
+		elif len(m_well) != 1:
+			sys.stderr.write("ERROR: Well material should be specified once.\n")
+			exit(1)
+		if kdim == 3 and len(m_barr) != 0:
+			sys.stderr.write("ERROR: In bulk mode, the barrier material cannot be specified.\n")
+			exit(1)
+		if len(m_barr) == 0:
+			m_layers = [m_well[0]]
+		elif len(m_barr) == 1:
+			m_layers = [m_barr[0], m_well[0], m_barr[0]]
+		elif len(m_barr) == 2:
+			m_layers = [m_barr[0], m_well[0], m_barr[1]]
+		if len(m_barr) > 2:
+			sys.stderr.write("ERROR: Maximally two barrier materials can be specified with \"mbarr\". For more layers, use \"mater\" or \"mlayer\".\n")
+			exit(1)
+	if len(substrate_material) == 0:
+		substrate_material = None
+	elif len(substrate_material) == 1:
+		substrate_material = substrate_material[0]
+	else:
+		sys.stderr.write("ERROR: Only one substrate material can be specified.\n")
+		exit(1)
+
+	## Layer thicknesses
+	if kdim != 3:
+		l_layers = layersizes(['llayer', 'llayers', 'layer', 'layers', 'thickness', 'thicknesses', 'thick'])
+		l_well = layersizes(['qw', 'lhgte', 'lqw', 'lwell'])
+		l_barr = layersizes(['bar', 'barr', 'barrier', 'lhgcdte', 'lbar', 'lbarr', 'lbarrier'])
+	else:
+		l_layers, l_well, l_barr = [1.0], [], []  # default for 3 dimensions
+
+	## Material and layer checks
+	if len(l_layers) > 0:
+		if len(l_well) != 0 or len(l_barr) != 0:
+			sys.stderr.write("ERROR: Layer thickness specifications must either be generic (llayer, etc.) or specific (lwell, lbarr, etc.) but cannot be mixed.\n")
+			exit(1)
+	else:  # len(l_layer) == 0
+		if kdim == 3 and len(l_well) != 0:
+			sys.stderr.write("Warning: In bulk mode, \"well\" has no meaning; the argument is ignored.\n")
+		elif kdim != 3 and len(l_well) != 1:
+			sys.stderr.write("ERROR: Well thickness must be specified once.\n")
+			exit(1)
+		if kdim == 3 and len(l_barr) != 0:
+			sys.stderr.write("ERROR: In bulk mode, the barrier thickness cannot be specified.\n")
+			exit(1)
+		if kdim == 3:
+			l_layers = [1.0]  # thick enough to avoid boundary effects
+		elif len(l_barr) == 0:
+			l_layers = [l_well[0]]
+		elif len(l_barr) == 1:
+			l_layers = [l_barr[0], l_well[0], l_barr[0]]
+		elif len(l_barr) == 2:
+			l_layers = [l_barr[0], l_well[0], l_barr[1]]
+		if len(l_barr) > 2:
+			sys.stderr.write("ERROR: Maximally two barrier thicknesses can be specified with \"lbarr\". For more layers, use \"llayer\".\n")
+			exit(1)
+
+	if len(m_layers) != len(l_layers):
+		sys.stderr.write("ERROR: The number of specified materials and of specified thicknesses must match.\n")
+		exit(1)
+
+	## Layer types
+	layer_types, _ = sysargv.getval(['ltype', 'ltypes', 'lstack'])
+
+	## Layer densities
+	layer_density = sysargv.getfloats(['ldens', 'layerdens', 'layerdensity'])
+
+	if "verbose" in sysargv:
+		print("Layer structure:")
+		for m, l in zip(reversed(m_layers), reversed(l_layers)):
+			print("%7.2f nm  %s" % (l, m.format("plain")))
+		print("Substrate:", " ---" if substrate_material is None else substrate_material.format("plain"))
+
+	## Lattice orientation
+	lattice_orientation = orientation()
+
+	# Strain
+	rel_strain = strain()
+	if ("ignorestrain" in sysargv) or ("nostrain" in sysargv):
+		rel_strain = 'none'
+
+	# Other parameters
+	for i in range(1, len(sysargv)):
+		if sysargv[i].lower() in ['linterface', 'interface'] and kdim <= 2:
+			linterface = sysargv.getfloat_after(i)
+		elif sysargv[i].lower() in ['zres', 'lres'] and kdim <= 2:
+			zres = sysargv.getfloat_after(i)
+		elif sysargv[i].lower() in ['width', 'w'] and kdim <= 1:
+			sysargv.setparsed(i)
+			w_num, w_res1, w_total1, narg = width_wres(sysargv[i:])
+			if w_res1 is not None:
+				if yres is not None:
+					sys.stderr.write("Warning: Duplicate value for wres (width resolution), argument \"%s\"\n" % sysargv[i])
+				yres = w_res1
+			if w_total1 is not None:
+				if width is not None:
+					sys.stderr.write("Warning: Duplicate value for w (width), argument \"%s\"\n" % sysargv[i])
+				width = w_total1
+			sysargv.setparsednext(narg)
+		elif sysargv[i].lower() in ['wres', 'yres'] and kdim <= 1:
+			w_res1 = sysargv.getfloat_after(i)
+			if yres is not None:
+				sys.stderr.write("Warning: Duplicate value for wres (width resolution), argument \"%s\"\n" % sysargv[i])
+			yres = w_res1
+		elif sysargv[i].lower() in ['alattice', 'alatt', 'latticeconst']:
+			a_lattice = sysargv.getfloat_after(i)
+		elif sysargv[i].lower() in ['stripdir', 'ribbondir']:
+			sdir = sysargv.getval_after(i).lower()
+			if lattice_orientation is not None:
+				sys.stderr.write("Warning: Duplicate definition for strip direction or orientation (argument \"%s\")\n" % sysargv[i])
+			m = re.match("(-?[0-9])(-?[0-9])(-?[0-9])", sdir)
+			if m is not None:
+				lattice_orientation = (int(m.group(1)), int(m.group(2)), int(m.group(3)))
+			elif sdir == "x":
+				lattice_orientation = 0
+			elif sdir == "y":
+				lattice_orientation = 90
+			elif sdir == "xy":
+				lattice_orientation = 45
+			elif sdir == "-xy":
+				lattice_orientation = -45
+			else:
+				sys.stderr.write("ERROR: Absent or invalid value for argument \"%s\". Valid arguments are x, y, xy, -xy, or ab0 where a and b are integers from -9 to 9. For angular values, use argument 'stripangle'.\n" % sysargv[i])
+				exit(1)
+		elif sysargv[i].lower() in ['stripangle', 'ribbonangle']:
+			sangle = sysargv.getfloat_after(i)
+			if lattice_orientation is not None:
+				sys.stderr.write("Warning: Duplicate definition for strip direction (argument \"%s\")\n" % sysargv[i])
+			lattice_orientation = sangle
+		elif sysargv[i].lower() in ['yconf', 'confinement', 'yconfinement']:
+			yconfinement = sysargv.getfloat_after(i)
+		elif sysargv[i].lower() in ['mn', 'ymn']:
+			sys.stderr.write("ERROR: Deprecated argument \"%s\". Enter the material as 'HgMnTe 0.02' or 'HgMnTe 2%%' (substitute the desired Mn concentration).\n" % sysargv[i])
+			exit(1)
+		elif sysargv[i].lower() in ['cd', 'ycd']:
+			sys.stderr.write("ERROR: Deprecated argument \"%s\". Enter the material as 'HgCdTe 0.68' or 'HgCdTe 68%%' (substitute the desired Cd concentration).\n" % sysargv[i])
+			exit(1)
+		elif sysargv[i].lower() in ['eightband', '8band', '8o', '8orb', '8orbital']:
+			sysargv.setparsed(i)
+			if norbitals is not None:
+				sys.stderr.write("ERROR: Conflicting or double argument \"%s\" for number of orbitals\n" % sysargv[i])
+				exit(1)
+			else:
+				norbitals = 8
+		elif sysargv[i].lower() in ['sixband', '6band', '6o', '6orb', '6orbital']:
+			sysargv.setparsed(i)
+			if norbitals is not None:
+				sys.stderr.write("ERROR: Conflicting or double argument \"%s\" for number of orbitals\n" % sysargv[i])
+				exit(1)
+			else:
+				norbitals = 6
+		elif sysargv[i].lower() in ['orbitals', 'orb', 'norb']:
+			sysargv.setparsed(i)
+			if norbitals is not None:
+				sys.stderr.write("ERROR: Conflicting or double argument \"%s\" for number of orbitals\n" % sysargv[i])
+				exit(1)
+			try:
+				norbitals = int(sysargv[i+1])
+			except:
+				sys.stderr.write("ERROR: Absent or invalid value for argument \"%s\"\n" % sysargv[i])
+				exit(1)
+			if norbitals not in [6, 8]:
+				sys.stderr.write("ERROR: Number of orbitals must be 6 or 8 (argument \"%s\")\n" % sysargv[i])
+				exit(1)
+			else:
+				sysargv.setparsednext(1)
+
+	## Check that norbitals has been set correctly
+	if norbitals is None:
+		sys.stderr.write("ERROR: Number of orbitals must be specified. Use 'norb 6', '6o', 'norb 8', or '8o', for example.\n")
+		exit(1)
+
+	magn = vectorvalues('b', onedim = True, twodim = True, threedim = True, defaultaxis = 'z')
+	if magn is not None:
+		magn = magn[0] if len(magn) >= 1 else None
+
+	matdef_renorm = not (("noren" in sysargv) or ("norenorm" in sysargv) or ("norenormalization" in sysargv) or ("norenormalisation" in sysargv))
+
+	return PhysParams(
+		kdim = kdim, l_layers = l_layers, m_layers = m_layers, layer_types = layer_types,
+		layer_density = layer_density, zres = zres, linterface = linterface,
+		width = width, yres = yres, substrate_material = substrate_material,
+		magn = magn, temperature = temperature, yconfinement = yconfinement,
+		strain_direction = strain_direction, a_lattice = a_lattice,
+		rel_strain = rel_strain, lattice_orientation = lattice_orientation,
+		norbitals = norbitals, matdef_renorm = matdef_renorm)
+
+def plot_options(plotopts = None, format_args = None):
+	"""Parse command-line arguments for plot options
+
+	Arguments:
+	plotopts     A dict instance or None. If this is a non-empty dict, use this
+	             as the initial values which are then updated by this function.
+	format_args  A dict of objects that are either dict or define .to_dict().
+	             Arguments for variable substitution in format_string().
+	             Typically this will be (physparams, opts).
+
+	Returns:
+	A dict instance.
+	"""
+	## Default values (plotopts and format_args)
+	if plotopts is None:
+		plotopts = {}
+	elif not isinstance(plotopts, dict):
+		raise TypeError("Argument plotopts must be a dict instance or None")
+	if format_args is None:
+		format_args = ()
+	elif not isinstance(format_args, tuple):
+		raise TypeError("Argument format_args must be a dict instance or None")
+
+	plotopts['legend'] = 'legend' in sysargv
+	plotopts['labels'] = 'char' in sysargv or 'labels' in sysargv or 'plotlabels' in sysargv
+
+	plotopts['mode'] = None
+	val, arg = sysargv.getval(['plotmode', 'plotstyle'])
+	if val is not None:
+		if val in ['auto', 'automatic', 'normal', 'join', 'curves', 'horizontal', 'spin'] or re.fullmatch(r'(spin|berry)(xy|xz|yz)1?', val) is not None:
+			plotopts['mode'] = val
+		else:
+			sys.stderr.write("Warning: '%s' is not a valid plot style (argument '%s'); plot style is set to automatic.\n" % (val, arg))
+
+	plotopts['xrange'] = None
+	val, arg = sysargv.getval(['xrange', 'krange', 'brange'], 2)
+	if isinstance(val, list) and len(val) in [1, 2, 3]:
+		try:
+			x1 = float(val[0])
+		except:
+			sys.stderr.write("ERROR: Invalid value for argument \"%s\"\n" % arg)
+			exit(1)
+		try:
+			x2 = float(val[1])
+		except:
+			x2 = 0.0
+		plotopts['xrange'] = [min(x1, x2), max(x1, x2)]
+
+	elif val is not None:
+		sys.stderr.write("ERROR: Invalid value for argument \"%s\"\n" % arg)
+		exit(1)
+
+	plotopts['obs'] = None
+	val, arg = sysargv.getval(['obs'])
+	if val is not None:
+		plotopts['obs'] = val
+
+	plotopts['plotvar'] = None
+	val, arg = sysargv.getval(['plotvar'])
+	if val is not None:
+		if val.startswith("k") or val.startswith("b"):
+			plotopts['plotvar'] = val
+		else:
+			sys.stderr.write("Warning: '%s' is not a valid plot variable (argument '%s')\n" % (val, arg))
+
+	plotopts['title_pos'] = None
+	val, arg = sysargv.getval(['titlepos', 'titleposition', 'plottitleposition', 'plottitlepos'])
+	if val is not None:
+		plotopts['title_pos'] = val.replace('-', '').replace('_', '').replace(' ', '')
+
+	plotopts['title'] = None
+	val, arg = sysargv.getval(['title', 'plottitle'])
+	if val is not None:
+		if params is not None:
+			plotopts['title'] = format_string(val, plotopts, *format_args, material_format = 'tex')
+		else:
+			sys.stderr.write("Warning: Could not format plot title; system parameters are missing\n")
+
+	# Obsolete: plotopts['density_unit'] = None
+	for a in ['densitycm', 'densityecm', 'densitypcm', 'densitynm', 'densityenm', 'densitypnm', 'densityunit', 'densunit', 'dunit']:
+		if a in sysargv:
+			sys.stderr.write("Warning: Density unit argument '%s' is deprecated. Use the configuration values 'dos_quantity' and 'dos_unit' instead.\n" % a)
+
+	val, arg = sysargv.getval(['rcfile', 'plotrc'])
+	if val is not None:
+		sys.stderr.write(f"Warning: Deprecated argument {arg}. Plot customization is now done by setting the configuration value 'fig_matplotlib_style'.\n")
+
+	plotopts['density_range'] = None
+	val, arg = sysargv.getval(['dosrange', 'densityrange', 'dosmax', 'densitymax'], 2)
+	if isinstance(val, list) and len(val) in [1, 2]:
+		try:
+			x1 = float(val[0])
+		except:
+			sys.stderr.write("ERROR: Invalid value for argument \"%s\"\n" % arg)
+			exit(1)
+		try:
+			x2 = float(val[1])
+		except:
+			x2 = None
+		plotopts['density_range'] = [None, x1] if x2 is None else [min(x1, x2), max(x1, x2)]
+	elif val is not None:
+		sys.stderr.write("ERROR: Invalid value for argument \"%s\"\n" % arg)
+		exit(1)
+
+	plotopts['obsrange'] = None
+	val, arg = sysargv.getval(['orange', 'obsrange', 'colorrange', 'colourrange'], 2)
+	if isinstance(val, list) and len(val) in [1, 2]:
+		try:
+			x1 = float(val[0])
+		except:
+			sys.stderr.write("ERROR: Invalid value for argument \"%s\"\n" % arg)
+			exit(1)
+		try:
+			x2 = float(val[1])
+		except:
+			x2 = None
+		plotopts['obsrange'] = [None, x1] if x2 is None else [min(x1, x2), max(x1, x2)]
+	elif val is not None:
+		sys.stderr.write("ERROR: Invalid value for argument \"%s\"\n" % arg)
+		exit(1)
+
+	return plotopts
+
+def options(opts = None, axial_automatic = None):
+	"""Parse command-line arguments for generic options (model options)
+
+	Argument:
+	opts    A dict instance or None. If this is a non-empty	dict, use this as
+	        the initial values which are then updated by this function.
+	axial_automatic  None, 'ignore', True or False. If True or False, use this
+	                 value if neither 'axial' nor 'nonaxial' are given on the
+	                 command line. If None, raise an error in that case. If
+	                 'ignore', pass on silently.
+
+	Returns:
+	A dict instance.
+	"""
+	## Default value (opts)
+	if opts is None:
+		opts = {}
+	elif not isinstance(opts, dict):
+		raise TypeError("Argument opts must be a dict instance or None")
+
+	## Set number of processes for multiprocessing / parallel computation
+	num_cpus, max_cpus = cpus()
+	if num_cpus > 1:
+		opts['cpu'] = num_cpus
+
+	## Ignore strain terms
+	if ("ignorestrain" in sysargv) or ("nostrain" in sysargv):
+		opts["ignorestrain"] = True
+
+	## Ignore exchange (obsolete argument)
+	if "ignoreexchange" in sysargv:
+		sys.stderr.write("ERROR: Argument 'ignoreexchange' is no longer supported. In order to disable exchange coupling, adjust the relevant material parameters.\n")
+		exit(1)
+
+	# Forced disable/enable lattice regularization (obsolete)
+	if "nolatticereg" in sysargv or "latticereg" in sysargv:
+		sys.stderr.write("ERROR: Lattice regularization is now controlled by configuration value 'lattice_regularization'.\n")
+		exit(1)
+
+	## Exclude non-axial terms
+	if ("noax" in sysargv) or ("noaxial" in sysargv) or ("nonaxial" in sysargv):
+		opts['axial'] = False
+	elif ("ax" in sysargv) or ("axial" in sysargv):
+		opts['axial'] = True
+	elif isinstance(axial_automatic, str) and axial_automatic.lower() == 'ignore':
+		pass
+	elif axial_automatic is None:
+		sys.stderr.write("ERROR: Either 'ax' ('axial') or 'noax' ('nonaxial') is required as an argument.\n")
+		exit(1)
+	else:
+		sys.stderr.write("Warning: Axial approximation has been set to %s automatically.\n" % axial_automatic)
+		opts['axial'] = axial_automatic
+
+	## Do not renormalize material parameters
+	# (just set the option; param will actually take care of it)
+	if ("noren" in sysargv) or ("norenorm" in sysargv) or ("norenormalization" in sysargv) or ("norenormalisation" in sysargv):
+		opts['renorm'] = False
+	else:
+		opts['renorm'] = True
+
+	## Include BIA terms
+	if "bia" in sysargv:
+		opts["bia"] = True
+
+	## Ignore in-plane orbital field
+	if ("ignoremagnxy" in sysargv) or ("ignorebxy" in sysargv) or ("ignoreorbxy" in sysargv):
+		opts["ignore_magnxy"] = True
+
+	## Dimensionful(l) observables
+	if 'dimful' in sysargv or 'dimfull' in sysargv:
+		opts['dimful_obs'] = True
+
+	## Orbital overlap observables
+	if 'orboverlaps' in sysargv or 'orbobs' in sysargv or 'orbitaloverlaps' in sysargv or 'orbitalobs' in sysargv:
+		opts['orbitalobs'] = True
+
+	## Number of eigenvalues and states
+	neig = sysargv.getint(["neig", "neigs"], 50, limit = [1, None])
+	opts['neig'] = neig
+
+	## Number of Landau levels
+	ll_max = sysargv.getint(["llmax", "nll"], 30, limit = [0, None])
+	opts['ll_max'] = ll_max
+
+	## Landau level (density / Berry curvature) broadening
+	broadening_val, broadening_val2 = broadening()
+	berrybroadening_val, _ = broadening(['berrybroadening', 'hallbroadening', 'chernbroadening'], allow_extra_val = False)
+	if len(broadening_val) >= 1:
+		broadening_setopts(opts, 'broadening', broadening_val)
+		if opts['broadening_type'].count('thermal') + opts['broadening_type'].count('fermi') > 1:
+			sys.stderr.write("Warning (Main): More than one thermal/fermi broadening given.\n")
+	elif 'hall' in sysargv:
+		broadening_setopts(opts, 'broadening', (0.5, 'gauss', 'auto'))
+		sys.stderr.write("Warning (Main): Argument 'hall' implies broadening_scale = %g meV. Use explicit 'broadening' argument to override this value.\n" % opts['broadening_scale'])
+
+	if broadening_val2 is not None:
+		if len(berrybroadening_val) > 0:
+			sys.stderr.write("ERROR (Main): Numerical extra argument to 'broadening' cannot be combined with argument 'berrybroadening'.\n")
+			exit(1)
+		broadening_setopts(opts, 'berrybroadening', broadening_val2)
+	elif len(berrybroadening_val) >= 1:
+		broadening_setopts(opts, 'berrybroadening', berrybroadening_val)
+	elif 'hall' in sysargv:
+		if opts['broadening_type'] == 'gauss':
+			opts['berrybroadening_scale'] = 0.1 * opts['broadening_scale']
+		elif isinstance(opts['broadening_type'], list):
+			n_gauss = opts['broadening_type'].count('gauss')
+			if 'gauss' in opts['broadening_type']:
+				i_gauss = opts['broadening_type'].index('gauss')
+				opts['berrybroadening_scale'] = 0.1 * opts['broadening_scale'][i_gauss]
+				if opts['broadening_type'].count('gauss') > 1:
+					sys.stderr.write("Warning (Main): For argument 'hall', extract Berry broadening width from first Gaussian broadening parameter only.\n")
+			else:
+				opts['berrybroadening_scale'] = 0.05  # default value
+		opts['berrybroadening_type'] = 'gauss'
+		opts['berrybroadening_dep'] = 'auto'
+		sys.stderr.write("Warning (Main): Argument 'hall' implies berrybroadening_scale = %g meV. Use explicit 'broadening' argument (with two values) to override this value.\n" % opts['berrybroadening_scale'])
+
+	## Temperature broadening (dostemp)
+	temp_broadening = sysargv.getfloat(["dostemp", "tbroadening", "tempbroadening"])
+	if temp_broadening is not None and temp_broadening < 0.0:
+		sys.stderr.write("ERROR: Broadening temperature may not be negative.\n")
+		exit(1)
+	if temp_broadening is not None and temp_broadening >= 0.0:
+		if 'broadening_type' not in opts:
+			opts['broadening_type'] = 'thermal'
+			opts['broadening_scale'] = temp_broadening
+			opts['broadening_dep'] = 'const'
+		elif isinstance(opts['broadening_type'], (str, list)):
+			if isinstance(opts['broadening_type'], str):
+				opts['broadening_type'] = [opts['broadening_type']]
+				opts['broadening_scale'] = [opts['broadening_scale']]
+				opts['broadening_dep'] = [opts['broadening_dep']]
+			if any([x in ['thermal', 'fermi'] for x in opts['broadening_type']]):
+				sys.stderr.write("ERROR: Arguments 'dostemp' and 'broadening thermal/fermi' cannot be combined.\n")
+				exit(1)
+			opts['broadening_type'].append('thermal')
+			opts['broadening_scale'].append(temp_broadening)
+			opts['broadening_dep'].append('const')
+		else:
+			raise TypeError("Type of opts['broadening_type'] should be str or list")
+		opts['tempbroadening'] = temp_broadening
+
+	## Target energy
+	targetenergy = sysargv.getfloats(["targetenergy", "e0"])
+	if len(targetenergy) == 0:
+		opts['targetenergy'] = 0.0  # default value
+	elif len(targetenergy) == 1:
+		opts['targetenergy'] = targetenergy[0]
+	else:
+		opts['targetenergy'] = targetenergy
+	# TODO: Some solvers might accept only a single value, not a list
+
+	## Artificial split
+	splitwarningvalue = 0.1
+	split = sysargv.getfloat(["split"], 0.0)
+	if split != 0.0:
+		opts['split'] = split
+	if abs(split) > splitwarningvalue:
+		sys.stderr.write("Warning (Main): Artificial energy splitting is large. It is advisable to keep it under %f (in meV).\n" % splitwarningvalue)
+	val, arg = sysargv.getval(['splittype'])
+	if val is not None:
+		opts['splittype'] = val
+
+	## Artificial shift of E1 bands (no longer supported)
+	if 'e1shift' in sysargv:
+		sys.stderr.write("Warning (Main): The argument 'e1shift' is no longer supported.\n")
+
+	## Energy shift and automatic shift to zero energy
+	eshift = sysargv.getfloat(["eshift", "energyshift"])
+	if eshift is not None:
+		opts['eshift'] = eshift
+	else:
+		opts['eshift'] = 0.0
+	opts['zeroenergy'] = ('zeroenergy' in sysargv)
+
+	## Get transitions
+	transitions_arg = transitions()
+	if isinstance(transitions_arg, list) and len(transitions_arg) == 2:
+		opts['transitions'] = transitions_arg[1]
+		opts['transitionsrange'] = transitions_arg[0]
+	else:
+		opts['transitions'] = bool(transitions_arg)
+		opts['transitionsrange'] = None
+
+	## BHZ anchor point
+	k0_bhz = sysargv.getfloat(["kbhz", "bhzk", "bhzat"], 0.0)
+	if k0_bhz != 0.0:
+		opts['k0_bhz'] = k0_bhz
+
+	## Zero of gauge potential
+	gauge_zero = sysargv.getfloat(["gaugezero", "gauge0"], 0.0)
+	if gauge_zero != 0.0:
+		opts['gauge_zero'] = gauge_zero
+	if abs(gauge_zero) > 1.0:
+		sys.stderr.write("Warning (Main): Zero of gauge potential lies outside the sample.\n")
+
+	## Periodic in y?
+	if "periodicy" in sysargv:
+		opts['periodicy'] = True
+
+	## Gate potential (+Vg/2 at top, -Vg/2 at bottom edge)
+	v_inner = sysargv.getfloat(["vinner", "vwell"], None)
+	v_outer = sysargv.getfloat(["vouter", "vtotal"], None)
+	vgate = sysargv.getfloat(["vgate", "vtb", "vg"], None)
+	if v_inner is not None:
+		if vgate is not None:
+			sys.stderr.write("ERROR (Main): Potential options (vgate, v_inner, v_outer) cannot be combined.\n")
+			exit(1)
+		opts['v_inner'] = v_inner
+	if v_outer is not None:
+		if vgate is not None or v_inner is not None:
+			sys.stderr.write("ERROR (Main): Potential options (vgate, v_inner, v_outer) cannot be combined.\n")
+			exit(1)
+		opts['v_outer'] = v_outer
+	if vgate is not None:
+		sys.stderr.write("Warning (Main): Potential option vgate (vtb, vg) has been replaced by options vtotal (alias v_outer) and vwell (alias v_inner). Use vtotal (v_outer) to get the same functionality as vgate formerly.\n")
+		opts['v_outer'] = vgate
+
+	## Surface potential at the interface
+	vsurf_v, vsurf_l, vsurf_quadratic = vsurf()
+	if vsurf_v != 0.0:
+		opts['vsurf'] = vsurf_v
+		opts['vsurf_l'] = vsurf_l
+		opts['vsurf_quadratic'] = vsurf_quadratic
+
+	## Potential files
+	val = potential(["pot", "potential"])
+	if val is not None and len(val) > 0:
+		opts['potentialfile'] = val
+	val = potential(["poty", "potentialy"])
+	if val is not None and len(val) > 0:
+		opts['ypotentialfile'] = val
+
+	## Selfconsistent calculation options
+	selfcon_max_it, selfcon_acc = selfcon()
+	if selfcon_max_it is not None:
+		opts['selfcon_max_iterations'] = selfcon_max_it
+	if selfcon_acc is not None:
+		opts['selfcon_accuracy'] = selfcon_acc
+	val, arg = sysargv.getval(['selfconweight', 'scweight', 'scw'])
+	if val is not None:
+		val = from_pct(val)
+		if val is not None:
+			opts['selfcon_weight'] = val
+
+	## Boundary conditions for solving Poisson equation
+	bc = potential_bc()
+	if bc is not None:
+		opts['custom_bc'] = bc
+
+	## Depletion layer charge and width
+	ndepletion, ldepletion = depletion()
+	if ndepletion is not None:
+		opts['n_depletion'] = ndepletion
+	if ldepletion is not None:
+		opts['l_depletion'] = ldepletion
+	carrierdensity = cmdargsrange.grid(args = ["carrdens", "cardens", "carrierdensity", "ncarr", "ncar", "ncarrier"], from_argv = sysargv)
+	if isinstance(carrierdensity, (np.ndarray, list)) and len(carrierdensity) >= 1:
+		opts['cardensrange'] = carrierdensity
+		# TODO: Single number is required for many functions. Distinction between
+		# cardensrange and cardens may disappear when all functions are reimplemented
+		# for density ranges, or raise an appropriate warning.
+		opts['cardens'] = carrierdensity[0]
+		if len(carrierdensity) > 1:
+			sys.stderr.write("Warning (cmdargs.options): Density ranges are supported only for a few functions. Most functions will take just the first density value.\n")
+	efield_arg = efield()
+	if efield_arg is not None:
+		opts['efield'] = efield_arg
+	e_cnp = sysargv.getfloat(["ecnp", "cnp", "efermi", "ef0"])
+	if e_cnp is not None:
+		opts['e_cnp'] = e_cnp
+	n_cnp = sysargv.getfloat(["noffset", "densoffset", "ncnp"])
+	if n_cnp is not None:
+		opts['n_offset'] = n_cnp
+	else:
+		n_cnp = sysargv.getfloat(["idosoffset", "dosoffset"])
+		if n_cnp is not None:
+			opts['n_offset'] = n_cnp / (2 * np.pi)**2
+	n_bg = sysargv.getfloat(["cardensbg"])
+	if n_bg is not None:
+		opts['n_bg'] = n_bg
+	opts['return_eivec'] = 'keepeivecs' in sysargv
+	opts['custom_interface_length'] = sysargv.getint(["custominterfacelengthnm"], limit = (0, np.inf))
+	return opts
+
+def outputid(format_args = None):
+	"""Parse command-line arguments for outputid (string inserted into file names)"""
+	outputid = ""
+	outputid, arg = sysargv.getval(["out", "outputid", "outputname", "outid", "outfile"])
+	if outputid is None or outputid == "":
+		outputid = ""
+	if isinstance(format_args, tuple) and (outputid == '?' or '{' in outputid):
+		outputid = format_string(outputid, *format_args)
+		if outputid is None:
+			exit(1)
+	return outputid
+
+def outdir(allow_mkdir = True, do_chdir = True, replacements = None):
+	"""Parse command-line arguments for output directory (and go there)
+
+	Arguments:
+	allow_mkdir   True or False. If True, create the directory if it does not
+	              exist yet. If False, do not create the directory
+	do_chdir      True or False. If True, change to new output directory. If
+	              False, do not do so.
+	replacements  A dict instance. The key value pairs indicate string
+	              substitution key -> value. This only applies to a directory
+	              that contains '@'.
+
+	Returns:
+	curdir   Previous working directory
+	outdir   New output directory
+	"""
+	curdir = os.getcwd()
+
+	d, arg = sysargv.getval(["dir", "outputdir", "outdir"])
+	outdir = None
+	if d is not None:
+		if isinstance(replacements, dict) and '@' in d:  # Handle directory name containing @var substitutions
+			for from_, to in replacements.items():
+				d = d.replace(from_, to)
+		if os.path.exists(d):
+			outdir = d
+		else:
+			if allow_mkdir:
+				try:
+					os.makedirs(d)
+				except OSError as e:
+					sys.stderr.write("Warning (cmdargs.outdir): Directory \'%s\' could not be created / %s\n" % (d, e))
+				except:
+					sys.stderr.write("Warning (cmdargs.outdir): Directory \'%s\' could not be created\n" % d)
+				else:
+					print("Output directory (created):", d)
+					outdir = d
+			else:
+				sys.stderr.write("Warning (cmdargs.outdir): Directory \'%s\' does not exist\n" % d)
+	if outdir is None:
+		if os.path.exists("data"):
+			outdir = "data"
+		else:
+			outdir = "."
+		print("Output directory:", outdir)
+
+	if do_chdir:
+		try:
+			os.chdir(outdir)
+		except:
+			sys.stderr.write("ERROR (cmdargs.outdir): Output directory not accessible.\n")
+			exit(1)
+	return curdir, os.path.normpath(os.path.join(curdir, outdir))
+
+def resume_from():
+	"""Parse command-line arguments for loading pickled DiagDataPoints
+	generated by tempout argument and resume process from there on.
+
+	Returns:
+	load_dir        Directory to load DiagDataPoint temporary files from.
+	resume_step     Step from which to resume progress from (manual overwrite).
+	"""
+
+	values, arg = sysargv.getval('resume', n = 2, mark = None)
+	sysargv.setparsed('resume')
+	if values is None:
+		return None, None
+	resume_step = None
+	load_dir = None
+	# load_dir and resume_step may be specified in arbitrary order:
+	for v in values:
+		try:
+			# For both sysargs following the 'resume' argument
+			# test if it is an integer specifying the step index.
+			resume_step = int(v)
+		except ValueError:
+			# If it's not an integer, it is either the directory value or the next argument
+			if load_dir is None:
+				# If we have not parsed a directory yet, do it now.
+				load_dir = v
+			else:
+				# otherwise no step was given (single value for the resume argument).
+				# Stop parsing more values (limited to 2 anyway).
+				break
+		sysargv.setparsednext(1)
+	if load_dir is None:
+		return None, None
+	elif load_dir == 'last':
+		tmpdirname = 'temp%s_' % outputid()
+		alltempout = [tempdir for tempdir in os.listdir() if tempdir.find(tmpdirname) != -1]
+		if len(alltempout) == 0:
+			sys.stderr.write("WARNING (cmdargs.resume_from): Resume from 'last' run was requested, but no tempout "
+			                 "folder was matched (should start with %s).\n" % os.path.join(os.getcwd(), tmpdirname))
+			return None, None
+		else:
+			load_dir = sorted(alltempout, key = lambda t: datetime.strptime(t.split('_')[-1], '%Y-%m-%dT%H-%M-%S'))[-1]
+			return load_dir, resume_step
+	elif os.path.exists(load_dir):
+		return load_dir, resume_step
+	else:
+		raise ValueError("Invalid path to temporary files for resume option: %s" % load_dir)
+
+def plotwf(onedim = False, twodim = True):
+	"""Parse command-line arguments for wave function plots (style and locations)
+
+	Arguments:
+	onedim, twodim   True or False. Whether wave-function options specific to 1
+	                 and/or 2 dimensions are accepted. May not both be False.
+	                 Default: False, True. (For LL mode, use default.)
+
+	Returns:
+	style       String. The plot style.
+	locations   List. Indicates where to evaluate the wave functions.
+
+	Note:
+	If the command-line argument is absent, return False, None.
+	"""
+	if "plotwf" not in sysargv:
+		return False, None
+	if (not onedim) and (not twodim):
+		sys.stderr.write("ERROR (cmdargs.plotwf): Arguments onedim and twodim cannot be False at the same time.\n")
+	argn = sysargv.index("plotwf")
+	sysargv.setparsed(argn)
+	if argn + 1 >= len(sysargv):
+		return "default", [0]
+	style = None
+	locations = []
+	for arg in sysargv[argn + 1:]:
+		if (arg in ['separate', 'together'] and twodim) or (arg in ['z', '1d', 'y', 'byband', 'by_band', 'color', 'colour', 'zy', 'yz'] and onedim) or arg == 'default':
+			if style is not None:
+				sys.stderr.write("Warning (cmdargs.plotwf): Got multiple styles for plotting wave functions.\n")
+			elif arg == "by_band":
+				style = "byband"
+			elif arg == "colour":
+				style = "color"
+			elif arg == "yz":
+				style = "zy"
+			else:
+				style = arg
+		elif arg in ['zero', '0']:
+			locations.append('zero')
+		elif arg in ['min', 'max', 'mid', 'all']:
+			locations.append(arg)
+		elif arg == 'minmax':
+			locations.extend(['min', 'max'])
+		elif arg in ['3', 'three', 'halves']:
+			locations.extend(['min', 'mid', 'max'])
+		elif arg in ['5', 'five', 'quarters']:
+			locations.extend(['min', '1/4', 'mid', '3/4', 'max'])
+		else:
+			try:
+				argval = float(arg)  # If it is a number, treat it as such
+			except:
+				break  # No match: Stop looking further
+			else:
+				locations.append(argval)
+		sysargv.setparsednext(1)
+	if style is None:
+		style = "default"
+	if locations == []:
+		locations = ['zero']
+	return style, list(set(locations))
+
+def bhz():
+	"""Parse command-line arguments for BHZ approximation
+
+	Returns:
+	[nl, bands_a, nu]   Here, nl and nu are (even) integers that indicate the
+	                    number of bands in the lower and upper B sector. They
+	                    can also be None, indicating that the maximum number of
+	                    bands should be taken. The middle element bands_a can
+	                    be any of:
+	                    (nal, nau)     Tuple of two integers indicating number
+	                                   of bands in A sector above and below the
+	                                   gap. Input is a pair of integers without
+	                                   signs.
+	                    [s1, s2, ...]  List of strings with the band labels of
+	                                   bands bands in A sector. Input, e.g.,
+	                                   'E1 H1'.
+	                    [i1, i2, ...]  List of integers indicating band indices
+	                                   of bands in A sector. Input is sequence
+	                                   of integers with explicit signs, e.g.,
+	                                   '-2 -1 +1 +2'.
+	"""
+	bhzarg = [None, None, None]
+	bhzintarg = []
+	bhzbandarg = []
+	if "bhz" in sysargv:
+		argn = sysargv.index("bhz")
+		sysargv.setparsed(argn)
+		if argn + 1 >= len(sysargv):
+			sys.stderr.write("Warning (cmdargs.bhz): Argument \'bhz\' should be followed by at least one argument\n")
+		for arg in sysargv[argn + 1:]:
+			m = re.match(r'[\+\-][0-9]+', arg)
+			if m is not None:
+				bhzbandarg.append(int(arg))
+				sysargv.setparsednext(1)
+				continue
+			m = re.match(r'[0-9]+', arg)
+			if m is not None:
+				bhzintarg.append(int(arg))
+				sysargv.setparsednext(1)
+				continue
+			m = re.match(r'[eElLhH][0-9]+[\+\-]?', arg)
+			if m is not None:
+				bhzbandarg.append(arg.upper())
+				sysargv.setparsednext(1)
+				continue
+			break
+		if len(bhzintarg) == 1 and len(bhzbandarg) == 0:
+			if bhzintarg[0] < 2 or (bhzintarg[0] % 2) == 1:
+				sys.stderr.write("Warning (cmdargs.bhz): Numbers following argument \'bhz\' should be nonzero, even integers\n")
+			na = 2 * max(bhzintarg[0] // 2, 2)
+			nau = 2 * (na // 4)  # floor division
+			nal = na - nau
+			bhzarg = [None, (nal, nau), None]
+		elif len(bhzintarg) == 2 and len(bhzbandarg) == 0:
+			if bhzintarg[0] + bhzintarg[1] < 2 or (bhzintarg[0] % 2) == 1 or (bhzintarg[0] % 2) == 1:
+				sys.stderr.write("Warning (cmdargs.bhz): Numbers following argument \'bhz\' should be nonzero, even integers\n")
+			nau = 2 * (bhzintarg[1] // 2)  # floor division
+			nal = 2 * (bhzintarg[0] // 2)  # floor division
+			if nal + nau == 0:
+				nal = 2
+			bhzarg = [None, (nal, nau), None]
+		elif len(bhzintarg) == 3 and len(bhzbandarg) == 0:
+			if bhzintarg[1] < 2 or bhzintarg[0] < 2 or bhzintarg[2] < 2 or (bhzintarg[1] % 2) == 1 or (bhzintarg[0] % 2) == 1 or (bhzintarg[2] % 2) == 1:
+				sys.stderr.write("Warning (cmdargs.bhz): Numbers following argument \'bhz\' should be nonzero, even integers\n")
+			na = 2 * max(bhzintarg[1] // 2, 2)
+			nau = 2 * (na // 4)  # floor division
+			nal = na - nau
+			nl = 2 * (bhzintarg[0] // 2)  # floor division
+			nu = 2 * (bhzintarg[2] // 2)  # floor division
+			bhzarg = [nl, [nal, nau], nu]
+		elif len(bhzintarg) == 4 and len(bhzbandarg) == 0:
+			if bhzintarg[0] < 2 or bhzintarg[1] < 2 or bhzintarg[2] < 2 or bhzintarg[3] < 2 or (bhzintarg[0] % 2) == 1 or (bhzintarg[1] % 2) == 1 or (bhzintarg[2] % 2) == 1 or (bhzintarg[3] % 2) == 1:
+				sys.stderr.write("Warning (cmdargs.bhz): Numbers following argument \'bhz\' should be nonzero, even integers\n")
+			nau = 2 * (bhzintarg[2] // 2)  # floor division
+			nal = 2 * (bhzintarg[1] // 2)  # floor division
+			nl = 2 * (bhzintarg[0] // 2)  # floor division
+			nu = 2 * (bhzintarg[3] // 2)  # floor division
+			bhzarg = [nl, (nal, nau), nu]
+		elif len(bhzintarg) == 0 and len(bhzbandarg) > 0:
+			bhzarg = [None, bhzbandarg, None]
+		elif len(bhzintarg) == 2 and len(bhzbandarg) > 0:
+			if bhzintarg[1] < 2 or bhzintarg[0] < 2 or (bhzintarg[1] % 2) == 1 or (bhzintarg[0] % 2) == 1:
+				sys.stderr.write("Warning (cmdargs.bhz): Numbers following argument \'bhz\' should be nonzero, even integers\n")
+			nl = 2 * (bhzintarg[0] // 2)  # floor division
+			nu = 2 * (bhzintarg[1] // 2)  # floor division
+			bhzarg = [nl, bhzbandarg, nu]
+		else:
+			sys.stderr.write("Warning (cmdargs.bhz): Illegal combination of band ids and amounts. Default to four-band model.\n")
+			bhzarg = [None, (2, 2), None]
+	else:
+		sys.stderr.write("Warning (cmdargs.bhz): Argument \'bhz\' is absent. Default to four-band model.\n")
+		bhzarg = [None, (2, 2), None]
+	return bhzarg
+
+def bandalign(directory = None):
+	"""Parse command-line arguments for band alignment (aka 'reconnection') parameters
+
+	Argument:
+	directory   String or None. Directory to seek for the input file. If None,
+	            use the current directory.
+
+	Returns:
+	A dict instance with the following elements, if the command-line argument is
+	present. This dict should be passed as keyword argument to bandindices() of
+	bandalign.py.
+	e0         Float or None. Energy.
+	g0         Integer or None. Gap index.
+	from_file  String or None. Filename of a '...byband.csv' file.
+	If the command-line argument is absent, return the empty dict {}, not None.
+	For testing whether the argument has been given, use 'if returnvalue:', not
+	'if returnvalue is None:'.
+	"""
+	val, arg = sysargv.getval(['bandalign', 'reconnect'], 2)
+	if val is None:
+		return {}  # see comment in docstring under 'Returns:'
+	e0 = None
+	g0 = None
+	from_file = None
+	try:
+		e0 = float(val[0])
+	except:
+		from_file = val[0]
+	if from_file is not None:
+		if directory is not None:
+			from_file = os.path.join(directory, from_file)
+		if not os.path.isfile(from_file):
+			from_file = None
+	if e0 is not None:
+		try:
+			g0 = int(val[1])
+		except:
+			pass
+	# TODO: second argument should not be marked as parsed if g0 is None
+	return {'e0': e0, 'g0': g0, 'from_file': from_file}
+
+### SOME 'PROCESSING' TOOLS ###
+
+def initialize_opts(opts, init = None, mapping = None, strict = False, update_only = True):
+	"""Merge options dict with its initialization values.
+	The argument init is the dict that is used as initializer, whose values are
+	copied to a new dict, and are subsequently updated from the values of the
+	argument opts. Renaming keys is possible with the argument mapping.
+
+	Arguments:
+	opts         A dict instance with values for options, typically the result
+	             of the cmdargs functions.
+	init         A dict with the default values. If not set (None), then the
+	             initializer is an empty dict.
+	mapping      A dict with 'translations' for the keys in the dict opts. It
+	             should be of the form {'from': 'to', ...}. If mapping[key] is
+	             None, the key is skipped.
+	strict       False or True. If True, do not include any key that is not in
+	             mapping. If True, all keys in opts that are not in mapping are
+	             included as is.
+	update_only  False or True. If True, do not include any key that is not in
+	             init. If False, also accept 'new' keys.
+
+	Returns:
+	A dict instance of the form {'keyword': value, ...} meant to be passed to
+	hamiltonian/diagonalization functions, etc.
+	"""
+	## Default value (mapping)
+	if mapping is None:
+		mapping = {}
+	elif not isinstance(mapping, dict):
+		raise TypeError("Argument mapping must be a dict instance or None")
+
+	## Fill newopts dict with values of init
+	newopts = {}
+	if isinstance(init, dict):
+		for key in init:
+			newopts[key] = init[key]
+	elif init is not None:
+		raise TypeError("Argument init must be a dict instance or None")
+
+	## Fill/update newopts dict with (mapped) values from opts
+	for o in opts:
+		key = mapping[o] if o in mapping else o if not strict else None
+		if key is not None and (key in newopts or not update_only):
+			newopts[key] = opts[o]
+
+	return newopts
+
+def format_string(fmt_string, *args, material_format = 'sub'):
+	"""Format a string using variable substitution from arguments.
+	This format follows the 'Python string formatting mini-language', see:
+	  https://docs.python.org/3/library/string.html#format-string-syntax
+	The formatting is restricted to named variables. Positional arguments, like
+	'{0}', '{1}' are not permitted.
+
+	Arguments:
+	fmt_string       String that needs to be parsed. Special case: If the format
+	                 string is "?", then display all available variables and
+	                 return None.
+	*args            Dict instances or class instances that define to_dict().
+	                 These contain the variables for which the values are
+	                 substituted. Typical inputs are params, opts, plotopts.
+	material_format  Style in which to print materials. Default is 'sub'.
+	"""
+	all_variables = {}
+	for arg in args:
+		if isinstance(arg, dict):
+			all_variables.update(arg)
+		elif isinstance(arg, PhysParams):
+			all_variables.update(arg.to_dict(material_format = material_format))
+		elif hasattr(arg, "to_dict"):
+			all_variables.update(arg.to_dict())
+		else:
+			raise TypeError("Argument must be a dict instance ot a class instance that defines to_dict")
+
+	vector_components = {}
+	for v in all_variables:
+		if isinstance(all_variables[v], Vector):
+			vdict = all_variables[v].to_dict(prefix = v + '_', all_components = True)  # vector components and length
+			vector_components.update(vdict)
+	all_variables.update(vector_components)
+
+	if fmt_string == '?':
+		print("Available variables for string formatting: " + ", ".join(sorted(all_variables.keys())))
+		return None
+	try:
+		formatted_string = fmt_string.format(**all_variables)
+	except KeyError:
+		sys.stderr.write("ERROR (format_string): Not a valid variable.\n")
+		sys.stderr.write("Available variables: " + ", ".join(sorted(all_variables.keys())) + "\n")
+		return None
+	except ValueError as err:
+		sys.stderr.write("ERROR (format_string): Error in format string: %s\n" % err)
+		return None
+	except IndexError:
+		sys.stderr.write("ERROR (format_string): Positional arguments are not permitted. Please use variable names.\n")
+		return None
+	except:
+		sys.stderr.write("ERROR (format_string): Unknown error in format string.\n")
+		raise
+	return formatted_string
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/cmdargs/range.py b/kdotpy-v1.0.0/src/kdotpy/cmdargs/range.py
new file mode 100644
index 0000000000000000000000000000000000000000..3c4eb598fe05ef4a7bf915cec1bbfd0fa7694ce1
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/cmdargs/range.py
@@ -0,0 +1,206 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+from .tools import isint, isfloat, CmdArgs
+
+
+def grid(args = 'k', from_argv = None):
+	"""Get value or range
+
+	Arguments:
+	args       String or list of strings. Which command-line argument(s) to
+	           match.
+	from_argv  List of strings or None. The list of command-line arguments that
+	           needs to be parsed. If None, use sys.argv.
+
+	Returns:
+	List of numerical values. On absence of the argument or on error, return an
+	empty list.
+	"""
+	if from_argv is None:
+		raise ValueError("Argument 'from_argv' must not be None")
+	allargs = []
+	if isinstance(args, str):
+		args = [args]
+	for i in range(1, len(from_argv)):
+		if from_argv[i].lower() in args:
+			if isinstance(from_argv, CmdArgs):
+				from_argv.setparsed(i)
+			## gather args:
+			for a in from_argv[i+1:]:
+				if isfloat(a) or a == "*" or a == "/" or a == "//":
+					allargs.append(a)
+				elif '*' in a and '/' in a:
+					raise ValueError("Invalid value for argument \"%s\"" % from_argv[i])
+				elif '*' in a:
+					s1 = a.split('*')
+					if s1[0] != "":
+						if isfloat(s1[0]):
+							allargs.append(s1[0])
+						else:
+							break
+
+					for s in s1[1:]:
+						if s != "":
+							if isfloat(s):
+								allargs.append('*')
+								allargs.append(s)
+							else:
+								break
+				elif '/' in a:
+					s1 = a.split('/')
+					if s1[0] != "":
+						if isfloat(s1[0]):
+							allargs.append(s1[0])
+						else:
+							break
+
+					for s in s1[1:]:
+						if s != "":
+							if isfloat(s):
+								allargs.append('/')
+								allargs.append(s)
+							else:
+								break
+				else:
+					break
+				if isinstance(from_argv, CmdArgs):
+					from_argv.setparsednext(1)
+			break
+
+	if len(allargs) >= 5 and isfloat(allargs[0]) and isfloat(allargs[1]) and isint(allargs[2]) and allargs[3] == '/' and isint(allargs[4]):
+		kstart = float(allargs[0])
+		kend = float(allargs[1])
+		kidx = int(allargs[2])
+		knum = int(allargs[4])
+		return [kstart + (kend - kstart) * kidx / knum]
+	if len(allargs) >= 5 and isfloat(allargs[0]) and isfloat(allargs[1]) and isint(allargs[2]) and allargs[3] == '//' and isint(allargs[4]):
+		kstart = float(allargs[0])
+		kend = float(allargs[1])
+		kidx = int(allargs[2])
+		knum = int(allargs[4])
+		return [kstart + (kend - kstart) * (kidx / knum)**2]
+	if len(allargs) >= 4 and isfloat(allargs[0]) and isfloat(allargs[1]) and allargs[2] == '/' and isint(allargs[3]):
+		kstart = float(allargs[0])
+		kend = float(allargs[1])
+		knum = int(allargs[3])
+		kstep = (kend - kstart) / knum
+		return list(np.arange(kstart, kend + 1e-6 * kstep, kstep))
+	if len(allargs) >= 4 and isfloat(allargs[0]) and isfloat(allargs[1]) and allargs[2] == '//' and isint(allargs[3]):
+		kstart = float(allargs[0])
+		kend = float(allargs[1])
+		knum = int(allargs[3])
+		return list(kstart + (kend - kstart) * np.linspace(0.0, 1.0, knum + 1) ** 2)
+	if len(allargs) >= 4 and isfloat(allargs[0]) and isfloat(allargs[1]) and allargs[2] == '/' and isfloat(allargs[3]):
+		kstart = float(allargs[0])
+		kend = float(allargs[1])
+		kstep = float(allargs[3])
+		return list(np.arange(kstart, kend + 1e-6 * kstep, kstep))
+	if len(allargs) >= 3 and isfloat(allargs[0]) and allargs[1] == '/' and isint(allargs[2]):
+		kstart = 0.0
+		kend = float(allargs[0])
+		knum = int(allargs[2])
+		kstep = (kend - kstart) / knum
+		return list(np.arange(kstart, kend + 1e-6 * kstep, kstep))
+	if len(allargs) >= 3 and isfloat(allargs[0]) and allargs[1] == '//' and isint(allargs[2]):
+		kstart = 0.0
+		kend = float(allargs[0])
+		knum = int(allargs[2])
+		kstep = (kend - kstart) / knum
+		return list(kstart + (kend - kstart) * np.linspace(0.0, 1.0, knum + 1) ** 2)
+	if len(allargs) >= 3 and isfloat(allargs[0]) and allargs[1] == '/' and isfloat(allargs[2]):
+		kstart = 0.0
+		kend = float(allargs[0])
+		kstep = float(allargs[2])
+		return list(np.arange(kstart, kend + 1e-6 * kstep, kstep))
+	if len(allargs) >= 3 and isint(allargs[0]) and allargs[1] == '*' and isfloat(allargs[2]):
+		kidx = int(allargs[0])
+		kstep = float(allargs[2])
+		return [kidx * kstep]
+	if len(allargs) == 1 and isfloat(allargs[0]):
+		return [float(allargs[0])]
+	# Argument is absent
+	# raise ValueError("ERROR: Invalid value for argument \"%s\"" % args[0])
+	return []
+
+def add_epsilon(arr, epsilon, two_sided_only = None):
+	"""Add value +/-epsilon to a range if the range contains zero.
+
+	The value +|epsilon| is inserted if the range contains a positive value <
+	|epsilon| (considered to be zero) and a positive value > |epsilon|. Similar
+	for negative values.
+
+	Arguments:
+	arr             List of numbers. The input values. The values should be
+	                monotonic for this function to work reliably.
+	epsilon         Float or None. If equal to zero or None, do nothing. If
+	                two_sided_only is set (True or False), the sign is
+	                meaningless. If two_sided_only is None, a positive or
+	                negative value is equivalent to two_sided_only being set to
+	                False or True, respectively.
+	two_sided_only  True, False, or None. If True, only insert values if input
+	                is two-sided (contains positive and negative values). If
+	                False, insert values regardless of two-sidedness. If None,
+	                use the sign of epsilon to determine the behaviour.
+
+	Returns:
+	The input list with extra values inserted and approximate zero set to
+	identical 0.0, if the insertion conditions are fulfilled. Otherwise, return
+	input list itself.
+	"""
+	if len(arr) == 0:
+		return arr
+	if epsilon is None or epsilon == 0.0:
+		return arr
+	if two_sided_only is None:
+		two_sided_only = epsilon < 0.0
+	epsilon = abs(epsilon)
+	if two_sided_only and (np.nanmax(arr) < epsilon or np.nanmin(arr) > -epsilon):
+		return arr
+	if np.nanmin(np.abs(arr)) > epsilon or np.nanmax(np.abs(arr)) < epsilon:
+		return arr
+	zero_index = np.argmin(np.abs(arr))
+	if zero_index == 0:
+		return [0.0, epsilon * np.sign(arr[1])] + list(arr[1:])
+	elif zero_index == len(arr) - 1:
+		return list(arr[:-1]) + [epsilon * np.sign(arr[-2]), 0.0]
+	else:
+		return list(arr[:zero_index]) + [epsilon * np.sign(arr[zero_index - 1]), 0.0, epsilon * np.sign(arr[zero_index + 1])] + list(arr[zero_index + 1:])
diff --git a/kdotpy-v1.0.0/src/kdotpy/cmdargs/tools.py b/kdotpy-v1.0.0/src/kdotpy/cmdargs/tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..1a79f8a48c602433a3f339f84c42c70d421496f6
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/cmdargs/tools.py
@@ -0,0 +1,395 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import re
+import sys
+import os
+import shlex
+
+def isfloat(s):
+	try:
+		float(s)
+	except:
+		return False
+	return True
+
+def isint(s):
+	try:
+		int(s)
+	except:
+		return False
+	return True
+
+def ismaterial(s):
+	"""Regular expression match for a material"""
+	m = re.match(r"(([A-Z][a-z]?)(_?\{?([.0-9]+)%?\}?)?)*$", s.strip())
+	return m is not None
+
+def from_pct(s):
+	"""Parse string with float or percentage as float"""
+	if len(s) >= 2 and s[-1] == '%':
+		s0 = s[:-1]
+		div = 100
+	else:
+		s0 = s
+		div = 1
+	try:
+		val = float(s0) / div
+	except:
+		return None
+	return val
+
+def remove_underscores(s):
+	return s.replace('_', '')
+
+def is_kdotpy_cmd(args):
+	if len(args) < 2:
+		return False
+	arg_path, arg_file = os.path.split(args[0])
+	arg_realpath = os.path.realpath(arg_path)
+	source_realpath = os.path.dirname(os.path.realpath(__file__))
+	iskdotpy = (arg_file == 'kdotpy')
+	ismainpy = (arg_file == '__main__.py' and arg_realpath == source_realpath)
+	isvalidscript = args[1] in ['1d', '2d', 'bulk', 'll', 'bulk-ll', 'merge', 'compare', 'batch', 'test', 'config']
+	return (iskdotpy or ismainpy) and isvalidscript
+
+class CmdArgs(object):
+	"""Container class that tracks parsing of command-line arguments ('rich sys.argv').
+
+	Attributes:
+	argv       List of strings. Arguments 'as is'
+	argvlower  List of strings. Lower case arguments with underscores removes
+	           (for case insensitive comparisons).
+	isparsed   List of boolean values with the same length as argv. For each
+	           argument, whether it has been parsed.
+	idx        Index of the most recently parsed argument.
+	"""
+	def __init__(self, args = None):
+		if args is None:
+			self.argv = sys.argv
+		elif isinstance(args, list):
+			self.argv = args
+		else:
+			raise TypeError("Argument 'args' must be a list or None")
+		self.argvlower = [remove_underscores(arg.lower()) for arg in self.argv]
+		self.isparsed = [False for a in self.argv]
+		if len(self.argv) >= 2:
+			is_kdotpy = is_kdotpy_cmd(self.argv)
+			self.isparsed[0] = is_kdotpy
+			self.isparsed[1] = is_kdotpy
+		self.idx = 0
+
+	def __iter__(self):
+		return iter(self.argv)
+
+	def __getitem__(self, i):
+		return self.argv[i]
+
+	def __len__(self):
+		return len(self.argv)
+
+	def setparsed(self, what, value = True):
+		"""Mark arguments (un)parsed
+
+		Arguments:
+		what     Integer, slice, or string. Which argument to mark. If a string,
+		         mark all instances of the string (case insensitive match).
+		value    True, False, or None. Target value, True means parsed, False
+		         means not parsed, None means do not mark.
+		"""
+		if value is None:
+			return
+		if isinstance(what, int):
+			self.isparsed[what] = value
+			self.idx = what
+		elif isinstance(what, slice):
+			for i in range(0, len(self.argv))[what]:
+				self.isparsed[i] = value
+				self.idx = i
+		elif isinstance(what, str):
+			for i, a in enumerate(self.argvlower):
+				if a == what.lower():
+					self.isparsed[i] = value
+					self.idx = i
+		else:
+			raise TypeError("Argument 'what' must be an integer, slice, or string.")
+
+	def setparsednext(self, n, value = True):
+		"""Mark next arguments (un)parsed.
+
+		Arguments:
+		n      Number of arguments to mark, starting at the argument following
+		       the previously marked argument.
+		value  True, False, or None. Target value, True means parsed, False
+		       means not parsed, None means do not mark.
+		"""
+		if value is None:
+			return
+		if not isinstance(n, int):
+			raise TypeError("Argument 'n' must be an integer.")
+		end = min(self.idx + 1 + n, len(self.argv))
+		for i in range(self.idx + 1, end):
+			self.isparsed[i] = value
+		self.idx = end - 1
+
+	def getparsed(self, value = True):
+		"""Get all arguments that are (not) parsed"""
+		return [arg for isp, arg in zip(self.isparsed, self.argv) if isp is value]
+
+	def unparsed_warning(self, color = True):
+		"""Get a pretty string for non-parsed arguments.
+		Call this at the end of the program."""
+		n_unparsed = len(self.getparsed(False))
+		if n_unparsed <= 1:
+			return None
+		s = "[%s]" % self.argv[0]
+		if color:
+			for isp, arg in zip(self.isparsed[2:], self.argv[2:]):
+				if not isp:
+					s += " \x1b[1;31m" + shlex.quote(arg) + "\x1b[0m"
+				else:
+					s += " " + arg
+		else:
+			for j in range(2, len(self.argv)):
+				if not self.isparsed[j]:
+					if self.isparsed[j-1]:
+						s += ' ... ' + shlex.quote(self.argv[j])
+					else:
+						s += ' ' + shlex.quote(self.argv[j])
+		return s
+
+	def has(self, arg, setparsed = True):
+		"""Return whether arg is in the list of arguments.
+		Comparison is case insensitive.
+
+		Arguments:
+		arg         Value to test
+		setparsed   True or False. Whether to mark the argument as parsed.
+
+		Returns:
+		True or False
+		"""
+		if setparsed and arg.lower() in self.argvlower:
+			self.setparsed(arg)
+		return arg.lower() in self.argvlower
+
+	def __contains__(self, arg):
+		return self.has(arg)
+
+	def index(self, arg):
+		return self.argvlower.index(arg.lower())
+
+	def getval(self, arg, n=1, mark = True):
+		"""Get value for 'arg value' in argument sequence self.argv
+
+		Arguments:
+		arg    String or list of strings. The command-line argument(s) that
+			   match(es).
+		n      Integer. Number of values after the command-line argument 'arg
+		       that will be returned. If self.argv is not long enough, then
+		       return all values till the end of self.argv.
+		mark   True, False, or None. If True or False, mark this argument parsed
+		       or not parsed, respectively. If None, do not mark.
+
+		Returns:
+		values       String (n=1), list of strings (n>1) or None (if the
+		             matching argument is the last in self.argv).
+		matched_arg  The command-line argument that matches.
+		"""
+		if isinstance(arg, str):
+			for i in range(2, len(self.argv)):
+				if self.argvlower[i] == arg:
+					self.setparsed(i, value = mark)
+					if i == len(self.argv) - 1:
+						return None, self.argv[i]
+					elif n <= 1:
+						self.setparsednext(1, value = mark)
+						return self.argv[i+1], self.argv[i]
+					elif i + 1 + n < len(self.argv):
+						self.setparsednext(n, value = mark)
+						return self.argv[i+1:i+1+n], self.argv[i]
+					else:
+						self.setparsednext(n, value = mark)
+						return self.argv[i+1:], self.argv[i]
+		elif isinstance(arg, list):
+			for i in range(2, len(self.argv)):
+				if self.argvlower[i].lower() in arg:
+					self.setparsed(i, value = mark)
+					if i == len(self.argv) - 1:
+						return None, self.argv[i]
+					elif n <= 1:
+						self.setparsednext(1, value = mark)
+						return self.argv[i+1], self.argv[i]
+					elif i + 1 + n < len(self.argv):
+						self.setparsednext(n, value = mark)
+						return self.argv[i+1:i+1+n], self.argv[i]
+					else:
+						self.setparsednext(n, value = mark)
+						return self.argv[i+1:], self.argv[i]
+		return None, ""
+
+	def getint(self, arg, default = None, limit = None):
+		"""Get integer value in argument sequence self.argv
+
+		Arguments:
+		arg        String or list of strings
+		default    Return value if arg is not found
+		limit      None or 2-tuple. If set, a value less than the lower bound or
+			       greater than the upper bound will raise an error.
+
+		Returns:
+		An integer
+		"""
+		retval = default
+		val, arg = self.getval(arg)
+		if val is None:
+			pass
+		elif isint(val):
+			retval = int(val)
+		else:
+			sys.stderr.write("ERROR (cmdargs.getint): Invalid value for argument \"%s\"\n" % arg)
+			exit(1)
+		if retval is not None and isinstance(limit, list) and len(limit) == 2:
+			if limit[0] is not None and retval < limit[0]:
+				sys.stderr.write("ERROR (cmdargs.getint): Value for argument \"%s\" out of bounds\n" % arg)
+				exit(1)
+			if limit[1] is not None and retval > limit[1]:
+				sys.stderr.write("ERROR (cmdargs.getint): Value for argument \"%s\" out of bounds\n" % arg)
+				exit(1)
+		return retval
+
+	def getfloat(self, arg, default = None, limit = None):
+		"""Get numeric (float) value in argument sequence self.argv
+
+		Arguments:
+		arg        String or list of strings
+		default    Return value if arg is not found
+		limit      None or 2-tuple. If set, a value less than the lower bound or
+			       greater than the upper bound will raise an error.
+
+		Returns:
+		A float
+		"""
+		retval = default
+		val, arg = self.getval(arg)
+		if val is None:
+			pass
+		elif isfloat(val):
+			retval = float(val)
+		else:
+			sys.stderr.write("ERROR (cmdargs.getfloat): Invalid value for argument \"%s\"\n" % arg)
+			exit(1)
+		if retval is not None and isinstance(limit, list) and len(limit) == 2:
+			if limit[0] is not None and retval < limit[0] - 1e-9:
+				sys.stderr.write("ERROR (cmdargs.getfloat): Value for argument \"%s\" out of bounds\n" % arg)
+				exit(1)
+			if limit[1] is not None and retval > limit[1] + 1e-9:
+				sys.stderr.write("ERROR (cmdargs.getfloat): Value for argument \"%s\" out of bounds\n" % arg)
+				exit(1)
+		return retval
+
+	def getfloats(self, arg, positive = False):
+		"""Get a sequence of numeric (float) values in argument sequence self.argv
+		Get all numeric values after the matching argument. If one argument
+		appears repeatedly, concatenate all the values.
+
+		Examples:
+		'arg 1 2.0 -1.0 foo ...' yields [1.0, 2.0, -1.0]
+		'arg 1 2.0 -1.0 foo ... arg 3 bar ...' yields [1.0, 2.0, -1.0, 3.0]
+
+		Arguments:
+		arg        String or list of strings
+		positive   False or True. If True, negative values raise an error.
+
+		Returns:
+		A list of floats.
+		"""
+		if isinstance(arg, str):
+			arg = [arg]
+		elif not isinstance(arg, list):
+			raise TypeError("arg must be a str or list instance")
+
+		retval = []
+		argn = 2
+		while argn < len(self.argv):
+			if self.argvlower[argn] in arg:
+				arg_kw = self.argvlower[argn]
+				self.setparsed(argn)
+				while argn < len(self.argv):
+					if argn + 1 >= len(self.argv):
+						break
+					try:
+						arg1 = float(self.argv[argn+1])
+					except:
+						arg1 = None
+					if arg1 is None:
+						break
+					self.setparsed(argn + 1)
+					if positive and arg1 is not None and arg1 < 0.0:
+						sys.stderr.write("ERROR (cmdargs.getfloats): Values for argument '%s' must not be negative.\n" % arg_kw)
+						exit(1)
+					retval.append(arg1)
+					argn += 1
+			argn += 1
+		return retval
+
+	def getval_after(self, idx):
+		"""Get generic value coming after position idx and mark idx and idx + 1 parsed."""
+		self.setparsed(idx)
+		try:
+			retval = self.argv[idx + 1]
+		except:
+			sys.stderr.write("ERROR (cmdargs.getval_after): Absent value for argument \"%s\"\n" % self.argv[idx])
+			exit(1)
+		else:
+			self.setparsednext(1)
+		return retval
+
+	def getfloat_after(self, idx):
+		"""Get numerical value coming after position idx and mark idx and idx + 1 parsed."""
+		self.setparsed(idx)
+		try:
+			retval = float(self.argv[idx + 1])
+		except:
+			sys.stderr.write("ERROR (cmdargs.getfloat_after): Absent or invalid value for argument \"%s\"\n" % self.argv[idx])
+			exit(1)
+		else:
+			self.setparsednext(1)
+		return retval
diff --git a/kdotpy-v1.0.0/src/kdotpy/cnp.py b/kdotpy-v1.0.0/src/kdotpy/cnp.py
new file mode 100644
index 0000000000000000000000000000000000000000..8b3df93c5aca901b5cae7cc9bc005150e6dd3d70
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/cnp.py
@@ -0,0 +1,418 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+import sys
+
+from .config import get_config_bool
+from .types import DiagDataPoint
+
+### CHARGE NEUTRALITY POINT ###
+
+def ismonotonic(ls, increasing = None, strict = False):
+	"""Is an array monotonic?
+
+	Arguments:
+	ls           List/array/tuple of numerical values.
+	increasing   True, False, or None. If True, check for an increasing array.
+	             If False, check for a decreasing array. If None, check for
+	             either.
+	strict       True or False. If True, check for strict inequality (< or >).
+	             If Flase, check for <= or >=.
+
+	Returns:
+	True or False.
+	"""
+	if not isinstance(ls, (list, tuple, np.ndarray)):
+		raise ValueError("Input should be a list, tuple, or array.")
+	if len(ls) <= 1:
+		return False
+	d = np.diff(np.array(ls))
+	if increasing is None:
+		if strict:
+			return np.all(d > 0) or np.all(d < 0)
+		else:
+			return np.all(d >= 0) or np.all(d <= 0)
+	elif increasing:
+		if strict:
+			return np.all(d > 0)
+		else:
+			return np.all(d >= 0)
+	else:  # decreasing
+		if strict:
+			return np.all(d < 0)
+		else:
+			return np.all(d <= 0)
+
+def lastindex(ls, x):
+	"""Find the index of the last instance of x in the list ls."""
+	if x not in ls:
+		return None
+	else:
+		return len(ls) - 1 - ls[::-1].index(x)
+
+def parse_bandlabel(b):
+	"""Split band label b (string) into 3-tuple (E/L/H, n, +/-) where n is an integer."""
+	if len(b) < 3:
+		return (None, None, None)
+	elif b[-1] == '?' and b[-2] != '?':
+		return (b[0], b[1:-2], b[-2])
+	else:
+		return (b[0], b[1:-1], b[-1])
+
+def estimate_charge_neutrality_point_legacy(params, data = None, print_gap_message = True):
+	"""Estimate charge neutrality point from band characters (legacy function).
+ 
+	Arguments:
+	params             PhysParams instance
+	data               DiagData instance or None.
+	print_gap_message  True or False. If True, print an information message to
+	                   stdout stating which gap is the charge neutral gap
+	**modelopts        Keyword arguments passed to diagonalization and
+	                   Hamiltonian functions.
+
+	Returns:
+	ecnp   Float or None. If successful, the charge neutral energy. None on
+	       failure.
+	"""
+	ecnp = None
+	# Check if data is a DiagDataPoint instance at k=0
+	if data is None:
+		sys.stderr.write("ERROR (estimate_charge_neutrality_point_legacy): Could not determine charge neutrality point: No data.\n")
+		return None
+	if not isinstance(data, DiagDataPoint):
+		raise TypeError("Argument data must be a DiagDataPoint instance")
+	if data.k != 0:
+		sys.stderr.write("ERROR (estimate_charge_neutrality_point_legacy): Could not determine charge neutrality point: Data not at k=0.\n")
+		return None
+
+	data1 = data.sort_by_eival()
+	eival = list(data1.eival)
+	if params is not None and params.nz == 1:  # bulk mode
+		if len(eival) != params.norbitals:
+			raise ValueError("In bulk mode, number of eigenvalues must be equal to number of orbitals")
+		if params.norbitals == 8:
+			return (eival[5] + eival[6]) / 2  # 8 orbitals: indices -6 -5 -4 -3 -2 -1 1 2
+		elif params.norbitals == 6:
+			return (eival[3] + eival[4]) / 2  # 8 orbitals: indices -4 -3 -2 -1 1 2
+		else:
+			raise ValueError("Number of orbitals must be either 6 or 8")
+	try:
+		bandtypes = list(data1.char)
+	except TypeError:
+		raise ValueError("Band character data not available")
+	if "verbose" in sys.argv:
+		for e, bt in zip(reversed(eival), reversed(bandtypes)):
+			print("%8.3f  %s" % (e, bt))
+	bt = [parse_bandlabel(b) for b in bandtypes]
+
+	# Check order of E bands
+	epidx = [int(b[1]) for b in bt if (b[0] == 'E' and b[2] == '+')]
+	emidx = [int(b[1]) for b in bt if (b[0] == 'E' and b[2] == '-')]
+	if len(epidx) == 0 or len(emidx) == 0:
+		sys.stderr.write("Warning (estimate_charge_neutrality_point_legacy): Could not determine charge neutrality point. No E+ or E- bands.\n")
+		return None
+	epmax = None if len(epidx) == 0 else max(epidx)
+	emmax = None if len(emidx) == 0 else max(emidx)
+
+	# If the E+ or E- bands are not arranged monotonically, then try a different
+	# counting strategy, by ignoring all E bands below L1
+	if not ismonotonic(epidx, increasing = True, strict = True):
+		sys.stderr.write("Warning (estimate_charge_neutrality_point_legacy): E+ bands not monotonic. Trying alternative strategy ignoring E+ bands below L1+.\n")
+		if ('L', '1', '+') in bt:
+			lpidx = bt.index(('L', '1', '+'))
+			epidx = [int(b[1]) for b in bt[lpidx:] if (b[0] == 'E' and b[2] == '+')]
+			epmax = None if len(epidx) == 0 else max(epidx)
+	if not ismonotonic(emidx, increasing = True, strict = True):
+		sys.stderr.write("Warning (estimate_charge_neutrality_point_legacy): E- bands not monotonic. Trying alternative strategy ignoring E- bands below L1-.\n")
+		if ('L', '1', '-') in bt:
+			lmidx = bt.index(('L', '1', '-'))
+			emidx = [int(b[1]) for b in bt[lmidx:] if (b[0] == 'E' and b[2] == '-')]
+			emmax = None if len(emidx) == 0 else max(emidx)
+
+	# If the E+ or E- bands are not arranged monotonically, then try a different
+	# counting strategy, by finding a sequence of at least three monotonic E
+	# bands
+	if not ismonotonic(epidx, increasing = True, strict = True):
+		sys.stderr.write("Warning (estimate_charge_neutrality_point_legacy): E+ bands not monotonic\n")
+		if len(epidx) >= 2 and epidx[-2] < epidx[-1]:
+			epmon = 2
+			while epidx[-epmon] < epidx[-epmon+1]:
+				epmon += 1
+			epmon -= 1
+			if epmon >= 3:
+				sys.stderr.write("Warning (estimate_charge_neutrality_point_legacy): Alternative counting strategy using sufficiently long monotonic sequence of E+ bands\n")
+				epmax = epidx[-1]
+			else:
+				sys.stderr.write("Warning (estimate_charge_neutrality_point_legacy): Too few monotonic E+ bands; increasing 'neig' and/or 'targetenergy' may fix this issue\n")
+		else:
+			sys.stderr.write("Warning (estimate_charge_neutrality_point_legacy): Too few monotonic E+ bands; increasing 'neig' and/or 'targetenergy' may fix this issue\n")
+
+	if not ismonotonic(emidx, increasing = True, strict = True):
+		sys.stderr.write("Warning (estimate_charge_neutrality_point_legacy): E- bands not monotonic\n")
+		if len(emidx) >= 2 and emidx[-2] < emidx[-1]:
+			emmon = 2
+			while emidx[-emmon] < emidx[-emmon+1]:
+				emmon += 1
+			emmon -= 1
+			if emmon >= 3:
+				sys.stderr.write("Warning (estimate_charge_neutrality_point_legacy): Alternative counting strategy using sufficiently long monotonic sequence of E- bands\n")
+				emmax = emidx[-1]
+			else:
+				sys.stderr.write("Warning (estimate_charge_neutrality_point_legacy):  Too few monotonic E- bands; increasing 'neig' and/or 'targetenergy' may fix this issue\n")
+		else:
+			sys.stderr.write("Warning (estimate_charge_neutrality_point_legacy): Too few monotonic E- bands; increasing 'neig' and/or 'targetenergy' may fix this issue\n")
+
+	j_ep = lastindex(bt, ('E', str(epmax), '+'))
+	j_em = lastindex(bt, ('E', str(emmax), '-'))
+
+	# Check if E bands are above all other bands
+	if j_ep is None or j_em is None:
+		sys.stderr.write("Warning (estimate_charge_neutrality_point_legacy): E+ and/or E- bands not present\n")
+	if j_ep != len(bt) - 1 and j_em != len(bt) - 1:
+		sys.stderr.write("Warning (estimate_charge_neutrality_point_legacy): Bands above E%i+ and E%i-\n" % (epmax, emmax))
+
+	# Check order of H bands
+	hpidx = [int(b[1]) for b in bt if (b[0] == 'H' and b[2] == '+')]
+	hmidx = [int(b[1]) for b in bt if (b[0] == 'H' and b[2] == '-')]
+	hpmin = None if len(hpidx) == 0 else min(hpidx)
+	hmmin = None if len(hmidx) == 0 else min(hmidx)
+	if not ismonotonic(hpidx, increasing = False, strict = True) or not ismonotonic(hmidx, increasing = False, strict = True):
+		sys.stderr.write("Warning (estimate_charge_neutrality_point_legacy): H bands not monotonic\n")
+
+	if hpmin is None or hmmin is None or hpmin > 1 or hmmin > 1:
+		sys.stderr.write("Warning (estimate_charge_neutrality_point_legacy): H1+/- band not present\n")
+	else:
+		j_hp = bt.index(('H', str(hpmin), '+'))
+		j_hm = bt.index(('H', str(hmmin), '-'))
+		if eival[j_ep] <= max(eival[j_hp], eival[j_hm]):
+			sys.stderr.write("Warning (estimate_charge_neutrality_point_legacy): E%i+ not above H1\n" % epmax)
+		if eival[j_em] <= max(eival[j_hp], eival[j_hm]):
+			sys.stderr.write("Warning (estimate_charge_neutrality_point_legacy): E%i- not above H1\n" % emmax)
+		dj = epmax + emmax
+		if dj > len(bt) - 1:
+			sys.stderr.write("Warning (estimate_charge_neutrality_point_legacy): Neutral point not in energy range\n")
+		else:
+			# Count down dj = epmax + emmax steps from the highest 'good' E band
+			j_above_gap = max(j_ep, j_em) + 1 - dj
+			j_below_gap = j_above_gap - 1
+			if print_gap_message:
+				sys.stdout.write("Charge neutrality point between %s (%.2f meV) and %s (%.2f meV)\n" % (bandtypes[j_below_gap], eival[j_below_gap], bandtypes[j_above_gap], eival[j_above_gap]))  # TODO: decide on whether to write to stdout or stderr
+			ecnp = (eival[j_below_gap] + eival[j_above_gap]) / 2.0
+	if ecnp is None:
+		sys.stderr.write("Warning (estimate_charge_neutrality_point_legacy): General error in calculating charge neutrality point\n")
+	return ecnp
+
+def estimate_charge_neutrality_point(params, data=None, print_gap_message=True):
+	"""Estimate charge neutrality point from band characters.
+
+	Arguments:
+	params             PhysParams instance
+	data               DiagDataPoint instance
+	print_gap_message  True or False. If True, print an information message to
+	                   stdout stating which gap is the charge neutral gap
+
+	Returns:
+	ecnp   Float or None. If successful, the charge neutral energy. None on
+	       failure.
+	"""
+	# Depending on configuration, call legacy method instead
+	if get_config_bool('cnp_legacy_method'):
+		return estimate_charge_neutrality_point_legacy(
+			params, data=data, print_gap_message=print_gap_message)
+
+	ecnp = None
+	# Check if data is a DiagDataPoint instance at k=0
+	if data is None:
+		sys.stderr.write("ERROR (estimate_charge_neutrality_point): Could not determine charge neutrality point: No data.\n")
+		return None
+	if not isinstance(data, DiagDataPoint):
+		raise TypeError("Argument data must be a DiagDataPoint instance")
+	if data.k != 0:
+		sys.stderr.write("ERROR (estimate_charge_neutrality_point): Could not determine charge neutrality point: Data not at k=0.\n")
+		return None
+	if data.char is None:
+		raise ValueError("Band character data not available")
+
+	sorted_data = data.sort_by_eival()
+	data_selector = check_char_in_order(sorted_data)
+	window_selector = get_confidence_window(data_selector)
+	if not all(window_selector):
+		sys.stderr.write("Warning (estimate_charge_neutrality_point): Non-monotonic band ordering detected. Continuing with filtered bands.\n")
+
+	eival = list(sorted_data.eival[window_selector])
+	if params is not None and params.nz == 1:  # bulk mode
+		if len(eival) != params.norbitals:
+			raise ValueError("In bulk mode, number of eigenvalues must be equal to number of orbitals")
+		if params.norbitals == 8:
+			return (eival[5] + eival[6]) / 2  # 8 orbitals: indices -6 -5 -4 -3 -2 -1 1 2
+		elif params.norbitals == 6:
+			return (eival[3] + eival[4]) / 2  # 8 orbitals: indices -4 -3 -2 -1 1 2
+		else:
+			raise ValueError("Number of orbitals must be either 6 or 8")
+	try:
+		bandtypes = list(sorted_data.char[window_selector])
+	except:
+		raise ValueError("Band character data not available")
+	if "verbose" in sys.argv:
+		for e, bt in zip(reversed(eival), reversed(bandtypes)):
+			print("%8.3f  %s" % (e, bt))
+	bt = [parse_bandlabel(b) for b in bandtypes]
+
+	epmax, emmax = get_highest_subband_index(bt, 'E')
+
+	# index of e subband with highest indexnr
+	j_ep = lastindex(bt, ('E', str(epmax), '+'))
+	j_em = lastindex(bt, ('E', str(emmax), '-'))
+
+	if j_ep is None or j_em is None:
+		sys.stderr.write("ERROR (estimate_charge_neutrality_point): Failed, because E+ and/or E- bands are missing\n")
+		return None
+	if epmax < 2 or emmax < 2:
+		sys.stderr.write("ERROR (estimate_charge_neutrality_point): It is required that En+ and En- are present with n >= 2.\n")
+		# The output can be correct if only one of E2+, E2- is present, but let us play it safe.
+		return None
+
+	# Count down dj = epmax + emmax steps from the highest 'good' E band
+	dj = epmax + emmax
+	j_above_gap = max(j_ep, j_em) + 1 - dj
+	j_below_gap = j_above_gap - 1
+	if j_above_gap <= 0:
+		sys.stderr.write("ERROR (estimate_charge_neutrality_point): Failed, because neutral point does not lie in energy range\n")
+		return None
+
+	if print_gap_message:
+		sys.stdout.write("Charge neutrality point between %s (%.2f meV) and %s (%.2f meV)\n" % (bandtypes[j_below_gap], eival[j_below_gap], bandtypes[j_above_gap], eival[j_above_gap]))  # TODO: decide on whether to write to stdout or stderr
+	if 'H1' not in bandtypes[j_above_gap] and 'H1' not in bandtypes[j_below_gap]:
+		sys.stderr.write("Warning (estimate_charge_neutrality_point): H1+/- not above or below CNP. Please check band order and CNP position in final result.\n")
+
+	ecnp = (eival[j_below_gap] + eival[j_above_gap]) / 2.0
+	return ecnp
+
+def to_int_typesafe(s):
+	try:
+		return int(s)
+	except:
+		return -1
+
+def check_char_in_order(data):
+	"""Check for each band character whether it is 'in order'.
+
+	Argument:
+	data     DiagDataPoint instance. The eigenvalues should be sorted for the
+	         result to make sense.
+
+	Returns
+	sel_io   Array with True values for bands that are in order, False for bands
+	         that are out-of-order.
+	"""
+	char = data.char
+	bandtypes = [parse_bandlabel(b) for b in char]  # parse band characters
+	# Split into character label (E,L,H,S) and number of nodes. +/- is ignored
+	bands_char = np.array([b[0] if isinstance(b[0], str) else '?' for b in bandtypes])
+	bands_nn = np.array([to_int_typesafe(b[1]) for b in bandtypes])
+
+	# Select all bands that are out-of-order
+	sel_ooo = np.zeros_like(char, dtype = bool)
+	for character in ['E', 'L', 'H']:
+		# Check if there are any valid bands with this character
+		if np.count_nonzero((bands_char == character) & (bands_nn > 0)) == 0:
+			continue
+
+		# Find band with minimal number of nodes and their indices in the array
+		min_nn = np.amin(bands_nn[(bands_char == character) & (bands_nn > 0)])
+		min_nn_idx = np.where((bands_char == character) & (bands_nn == min_nn))
+		# Find first/last index of 'minimal band' and mark all bands that are
+		# ordered unexpectedly with respect to this band as 'out-of-order'.
+		if character == 'E':
+			first_idx = np.amin(min_nn_idx)
+			sel_ooo |= (bands_char == character) & (np.arange(len(bandtypes)) < first_idx)
+		else:
+			last_idx = np.amax(min_nn_idx)
+			sel_ooo |= (bands_char == character) & (np.arange(len(bandtypes)) > last_idx)
+	return ~sel_ooo  # True for all bands that are in order (= not out-of-order).
+
+
+def get_confidence_window(selector):
+	"""Create a more strict selector array from existing selector.
+	Find biggest chunk of Trues and set everything around to False.
+
+	Arguments:
+	selector      Numpy array of boolean dtype. This array labels whether each
+	              band is in order (True) or out-of-order (False).
+
+	Returns:
+	new_selector  Numpy array of boolean dtype. It contains the largest
+	              consecutive block of True values of selector and is
+	              False elsewhere.
+	"""
+	if np.count_nonzero(selector) == 0:
+		return selector
+
+	# Find indices where True->False or False->True
+	a = np.r_[False, selector, False]
+	argdiff = np.nonzero(np.diff(a))[0]
+	start, end = argdiff.reshape(-1, 2).transpose()
+
+	# Find longest interval and return a boolean array where only the values in
+	# this interval are set to True.
+	arglongest = np.argmax(end - start)
+	new_selector = np.zeros_like(selector)
+	new_selector[start[arglongest]: end[arglongest]] = True
+	return new_selector
+
+
+def get_highest_subband_index(bands, subband_character):
+	"""Get highest index for '+' and '-' for subband character.
+
+	Arguments:
+	bands               List of tuples from parse_bandlabel
+	subband_character   String with character [E, L, H, S]"""
+	max_pm = []
+	for sign in ['+', '-']:
+		ids = [
+				int(band[1]) for band in bands
+				if band[0] == subband_character and band[-1] == sign
+			]
+		if not ids:
+			max_pm.append(None)
+		else:
+			max_pm.append(np.max(ids))
+	return max_pm
diff --git a/kdotpy-v1.0.0/src/kdotpy/config.py b/kdotpy-v1.0.0/src/kdotpy/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..944b44dd850ffd7ce42095b73a517a614f399861
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/config.py
@@ -0,0 +1,787 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import os
+import sys
+import shutil
+from difflib import get_close_matches
+import tempfile
+import re
+import subprocess as subp
+
+### INITIAL CONFIGURATION ###
+
+configpath = os.path.join(os.path.expanduser('~'), '.kdotpy')
+configpath_legacy = os.path.join(os.path.expanduser('~'), '.hgmnte')
+configpaths = [configpath, configpath_legacy]
+default_config = {
+	'diag_solver': 'auto',
+	'diag_solver_worker_type': 'auto',
+	'diag_solver_cupy_dtype': 'double',
+	'diag_solver_cupy_iterations': '5',
+	'diag_solver_cupy_gemm_dim_thr': '4e6',
+	'diag_save_binary_ddp': 'false',
+	'err_unexpected_ignore': 'true',
+	'err_unexpected_print_traceback': 'true',
+	'task_retries': '2',
+	'tasks_grouped': 'false',
+	'numpy_printprecision': '6',
+	'numpy_linewidth': '200',
+	'job_monitor_limit': '101',
+	'band_align_exp': '4',
+	'band_align_ndelta_weight': '20',
+	'band_char_node_threshold': '1e-6',
+	'band_char_orbital_threshold': '5e-3',
+	'band_char_use_minmax': 'true',
+	'band_char_make_real': 'false',
+	'bandindices_adiabatic_debug': 'false',
+	'batch_stderr_extension': 'txt',
+	'batch_stdout_extension': 'txt',
+	'batch_float_format': '%s',
+	'berry_dk': '1e-3',
+	'berry_ll_simulate': 'false',
+	'cnp_legacy_method': 'false',
+	'color_dos': 'Blues',
+	'color_idos': 'RdBu_r',
+	'color_localdos': 'cividis,jet',
+	'color_trans': 'hot_r',
+	'color_bindex': 'tab21posneg',
+	'color_indexed': 'tab20alt,tab20',
+	'color_indexedpm': 'tab20',
+	'color_shadedpm': 'bluereddual',
+	'color_ipr': 'inferno_r',
+	'color_energy': 'jet',
+	'color_posobs': 'grayred',
+	'color_sigma': 'inferno_r',
+	'color_symmobs': 'bluered',
+	'color_threehalves': 'yrbc',
+	'color_wf_zy': 'Blues',
+	'csv_style': 'csv',
+	'csv_multi_index': 'tuple',
+	'csv_bandlabel_position': 'top',
+	'fig_matplotlib_style': 'kdotpy.mplstyle',
+	'fig_hsize': '150',
+	'fig_vsize': '100',
+	'fig_lmargin': '20',
+	'fig_rmargin': '4',
+	'fig_bmargin': '12',
+	'fig_tmargin': '3',
+	'fig_charlabel_space': '0.8',
+	'fig_colorbar_space': '30',
+	'fig_colorbar_size': '4',
+	'fig_colorbar_margin': '7.5',
+	'fig_colorbar_method': 'insert',
+	'fig_colorbar_abstwosided': 'true',
+	'fig_colorbar_labelpos': 'center',
+	'fig_extend_xaxis': '0.05',
+	'fig_inset_size': '30',
+	'fig_inset_margin': '3',
+	'fig_inset_color_resolution': '20',
+	'fig_legend_fontsize': 'auto',
+	'fig_spin_arrow_length': '5',
+	'fig_max_arrows': '20',
+	'fig_arrow_color_2d': '#c0c0c0',
+	'fig_ticks_major': 'auto',
+	'fig_ticks_minor': 'none',
+	'fig_unit_format': '[]',
+	'dos_interpolation_points': '100',
+	'dos_energy_points': '1000',
+	'dos_convolution_points': '2000',
+	'dos_print_validity_range': 'true',
+	'dos_print_momentum_multiplier': 'false',
+	'dos_quantity': 'p',
+	'dos_unit': 'nm',
+	'dos_strategy_no_e0': 'dos',
+	'bhz_allow_intermediate_bands': 'false',
+	'bhz_points': '200',
+	'bhz_gfactor': 'false',
+	'bhz_abcdm': 'false',
+	'bhz_ktilde': 'true',
+	'bhz_plotcolor': 'red,blue,black',
+	'bhz_plotstyle': 'dotted',
+	'lattice_regularization': 'false',
+	'lattice_zres_strict': 'true',
+	'magn_epsilon': '-1e-4',
+	'selfcon_full_diag': 'true',
+	'selfcon_acceptable_status': '1',
+	'selfcon_check_chaos_steps': '4',
+	'selfcon_check_orbit_steps': '4',
+	'selfcon_convergent_steps': '5',
+	'selfcon_debug': 'false',
+	'selfcon_diff_norm': 'rms',
+	'selfcon_dynamic_time_step': 'false',
+	'selfcon_erange_from_eivals': 'false',
+	'selfcon_ll_use_broadening': 'false',
+	'selfcon_energy_points': '1000',
+	'selfcon_min_time_step': '0.001',
+	'selfcon_potential_average_zero': 'true',
+	'selfcon_symmetrization_constraint': 'strict',
+	'selfcon_use_init_density': 'false',
+	'transitions_min_amplitude': '0.01',
+	'transitions_min_deltae': '0.1',
+	'transitions_max_deltae': '0',
+	'transitions_dispersion_num': '4',
+	'transitions_broadening_type': 'lorentzian',
+	'transitions_broadening_scale': '2.5',
+	'transitions_all_if_filtered': 'false',
+	'transitions_spectra': 'false',
+	'transitions_plot': 'true',
+	'plot_constdens_color': 'blue',
+	'plot_dispersion_default_color': 'blue',
+	'plot_dispersion_energies': 'true',
+	'plot_dispersion_energies_color': 'black',
+	'plot_dispersion_parameter_text': 'true',
+	'plot_dispersion_stack_by_index': 'false',
+	'plot_dos_color': 'blue',
+	'plot_dos_energies': 'true',
+	'plot_dos_vertical': 'true',
+	'plot_dos_validity_range': 'true',
+	'plot_dos_fill': 'false',
+	'plot_idos_fill': 'false',
+	'plot_dos_units_negexp': 'false',
+	'plot_ecnp': 'false',
+	'plot_rasterize_pcolormesh': 'true',
+	'plot_rxy_hall_slope': 'true',
+	'plot_sdh_markers': 'true',
+	'plot_sdh_markers_color': 'red',
+	'plot_sdh_scale_amount': '0',
+	'plot_transitions_labels': 'true',
+	'plot_transitions_quantity': 'rate',
+	'plot_transitions_max_absorption': '0.05',
+	'plot_transitions_frequency_ticks': 'true',
+	'plot_wf_orbitals_realshift': 'false',
+	'plot_wf_orbitals_order': 'standard',
+	'plot_wf_mat_label_rot': '0',
+	'plot_wf_mat_min_thick_label': '0.15',
+	'plot_wf_zy_format': 'pdf',
+	'plot_wf_zy_bandcolors': 'hsl',
+	'plot_wf_zy_scale': 'separate',
+	'plot_wf_y_scale': 'size',
+	'plot_wf_delete_png': 'true',
+	'plot_wf_together_num': '12',
+	'table_berry_precision': '4',
+	'table_dos_precision': '8',
+	'table_dos_scaling': 'false',
+	'table_dos_units_negexp': 'false',
+	'table_data_label_style': 'plain',
+	'table_data_unit_style': 'plain',
+	'table_dispersion_precision': '5',
+	'table_dispersion_data_label': 'true',
+	'table_dispersion_units': 'true',
+	'table_dispersion_unit_style': 'plain',
+	'table_dispersion_obs_style': 'raw',
+	'table_qz_precision': '5',
+	'table_extrema_precision': '5',
+	'table_absorption_precision': '5',
+	'table_transitions_precision': '6',
+	'table_wf_files': 'csv',
+	'table_wf_precision': '5',
+	'wf_locations_exact_match': 'true',
+	'wf_locations_filename': 'true',
+	'xml_omit_default_config_values': 'false',
+	'xml_shorten_command': 'false',
+}
+_config = {}
+
+# Deprecated configuration keys. If these are included in a configuration file
+# or on the command line, a warning will be shown.
+deprecated_config = ['table_transitions_ratecoeff_unit']
+
+def initialize_config(do_write = True, warn_deprecated = True):
+	"""Initialize configuration
+
+	Recipe:
+	Check legacy config (hgmnterc)
+	Check whether kdotpyrc exists and if not, write it with default values
+	Read kdotpyrc
+	Write new config file (optionally, see argument do_write)
+	Load custom configuration from command line (file or values)
+
+	Argument:
+	do_write  True or False. If True (default), write a new complete config file
+	          after all values have been set; this should be done if this
+	          function is called from kdotpy-xx.py. If False, skip this step;
+	          this is useful if this function is called in parallel processes on
+	          Windows.
+	warn_deprecated  True or False. Whether to show a deprecation warning for
+	                 deprecated configuration keys. This should be False when
+	                 this function is called from kdotpy-config.py.
+
+	No return value.
+	"""
+	global _config
+	check_legacy_config()
+	if not os.path.exists(os.path.join(configpath, 'kdotpyrc')):
+		sys.stderr.write("Warning (initialize_config): Write new default config file\n")
+	else:
+		# Initial config file
+		try:
+			read_config(os.path.join(configpath, 'kdotpyrc'), init=True, warn_deprecated=warn_deprecated)
+		except:
+			sys.stderr.write("ERROR (initialize_config): Cannot read default config file\n")
+			raise
+	# Rewrite config file after initialization, so that possibly new keys are
+	# written to the file. It is not necessary to call write_config() upon exit.
+	if do_write:
+		write_config()
+	# Load a custom config file or take values from command line
+	cmdargs_config()
+	return
+
+def check_legacy_config():
+	"""Check for legacy config file hgmnterc
+	Check whether the config files kdotpyrc (new) and hgmnterc (old) is present.
+	If the old one is present and the new one is not, copy and rename.
+
+	No return value.
+	"""
+	newcfg = os.path.join(configpath, 'kdotpyrc')
+	oldcfg = os.path.join(configpath_legacy, 'hgmnterc')
+	if not os.path.exists(newcfg) and os.path.exists(oldcfg):
+		if not os.path.exists(configpath):
+			try:
+				os.mkdir(configpath)
+			except:
+				sys.stderr.write("ERROR (check_legacy_config): Cannot create config path\n")
+				raise
+		try:
+			shutil.copy2(oldcfg, newcfg)
+		except:
+			sys.stderr.write("Warning (check_legacy_config): Copying of legacy config file to new location ('%s' -> '%s') has failed. Please try it manually.\n" % (oldcfg, newcfg))
+		else:
+			sys.stderr.write("Warning (check_legacy_config): Copying of legacy config file to new location ('%s' -> '%s') was successful. Please remove the old file manually.\n" % (oldcfg, newcfg))
+	if os.path.exists(configpath_legacy):
+		ls = os.listdir(configpath_legacy)
+		n_otherfiles = len(ls)
+		if 'hgmnterc' in ls:
+			n_otherfiles -= 1
+		if n_otherfiles > 0:
+			sys.stderr.write("Warning (check_legacy_config): Legacy config path '%s' contains other files than the default config file. Please check and move config files to '%s' manually.\n" % (configpath_legacy, configpath))
+		elif n_otherfiles == 0:
+			sys.stderr.write("Warning (check_legacy_config): Legacy config path '%s' exists but does not contain other files than the default config file. Please remove the directory '%s' manually.\n" % (configpath_legacy, configpath_legacy))
+	return
+
+### RETRIEVAL OF CONFIG VALUES ###
+
+def get_config(key, choices = None, case_sensitive = False, allow_default = True):
+	"""Get (string) configuration value.
+
+	Arguments:
+	key      String. Configuration key.
+	choices  None or list. If set, raise an error or warning if the
+	         configuration value is not an element of the list.
+	case_sensitive  False or True. If True, the 'choice' test is done in a case
+	                sensitive manner. If False (default), this check is case
+	                insensitive.
+	allow_default   False or True. If True, a failed 'choice' test will return
+	                the default value and raise a warning. If False, raise an
+	                error on failed 'choice' test and exit the program.
+
+	Returns:
+	string value
+	"""
+	if len(_config) == 0:
+		# This happens in parallel processes created with 'spawn' method (only option for Windows).
+		# Reload configuration for this process, but do not rewrite the file:
+		initialize_config(do_write = False)
+	if key in _config:
+		val = _config[key]
+	elif key in default_config:
+		val = default_config[key]
+	else:
+		val = None
+	if choices is None:
+		return val
+	else:
+		val1 = val if case_sensitive else val.lower()
+		if val1 in choices:
+			return val
+		elif allow_default and key in default_config:
+			sys.stderr.write("ERROR (get_config): Invalid value '%s' for configuration option %s. Possible values are: %s. Using default value '%s'.\n" % (val, key, ", ".join(choices), default_config[key]))
+			return default_config[key]
+		else:
+			sys.stderr.write("ERROR (get_config): Invalid value '%s' for configuration option %s. Possible values are: %s\n" % (val, key, ", ".join(choices)))
+			exit(1)
+
+def get_config_num(key, minval = None, maxval = None):
+	"""Get numeric configuration value
+
+	Arguments:
+	key      String. Configuration key.
+	minval   None or a number. If set, the lower bound. If the actual value is
+	         < minval, return minval.
+	maxval   None or a number. If set, the upper bound. If the actual value is
+	         > maxval, return maxval.
+
+	Returns:
+	Number of type float.
+	"""
+	val = get_config(key)
+	if val is None:
+		return None
+	try:
+		val = float(val)
+	except:
+		sys.stderr.write("ERROR (get_config_num): Configuration option %s must be a numerical value.\n" % key)
+		exit(1)
+	if minval is not None and val < minval:
+		sys.stderr.write("Warning (get_config_num): Configuration option %s must be a numerical value >= %s.\n" % (key, minval))
+		val = minval
+	if maxval is not None and val > maxval:
+		sys.stderr.write("Warning (get_config_num): Configuration option %s must be a numerical value <= %s.\n" % (key, maxval))
+		val = maxval
+	return val
+
+def get_config_num_auto(key, automatic = ['none', 'auto', 'automatic'], minval = None, maxval = None):
+	"""Get numeric configuration value or None if the value is set to 'automatic'.
+
+	Arguments:
+	key        String. Configuration key.
+	automatic  List of strings that evaluate to 'automatic'.
+	minval     None or a number. If set, the lower bound. If the actual value is
+	           < minval, return minval.
+	maxval     None or a number. If set, the upper bound. If the actual value is
+	           > maxval, return maxval.
+
+	Development note:
+	The default value of argument automatic is not modified, hence safe.
+
+	Returns:
+	None or number of type float
+	"""
+	val = get_config(key)
+	if val is None:
+		return None
+	if len(val) == 0 or val.lower() in automatic:
+		return None
+	try:
+		val = float(val)
+	except:
+		sys.stderr.write("ERROR (get_config_num_auto): Configuration option %s must be a numerical value, or one of: %s.\n" % (key, ", ".join(automatic)))
+		exit(1)
+	if minval is not None and val < minval:
+		sys.stderr.write("Warning (get_config_num_auto): Configuration option %s must be a numerical value >= %s, or one of %s.\n" % (key, minval, ", ".join(automatic)))
+		val = minval
+	if maxval is not None and val > maxval:
+		sys.stderr.write("Warning (get_config_num_auto): Configuration option %s must be a numerical value <= %s, or one of %s.\n" % (key, maxval, ", ".join(automatic)))
+		val = maxval
+	return val
+
+def get_config_int(key, minval = None, maxval = None):
+	"""Get integer numeric configuration value
+
+	Arguments:
+	key      String. Configuration key.
+	minval   None or a number. If set, the lower bound. If the actual value is
+	         < minval, return minval.
+	maxval   None or a number. If set, the upper bound. If the actual value is
+	         > maxval, return maxval.
+
+	Returns:
+	Number of type int.
+	"""
+	val = get_config(key)
+	if val is None:
+		return None
+	try:
+		val = int(val)
+	except:
+		sys.stderr.write("ERROR (get_config_int): Configuration option %s must be an integer value.\n" % key)
+		exit(1)
+	if minval is not None and val < minval:
+		sys.stderr.write("Warning (get_config_int): Configuration option %s must be an integer value >= %s.\n" % (key, minval))
+		val = minval
+	if maxval is not None and val > maxval:
+		sys.stderr.write("Warning (get_config_int): Configuration option %s must be an integer value <= %s.\n" % (key, maxval))
+		val = maxval
+	return val
+
+def get_config_bool(key):
+	"""Get boolean configuration value
+
+	Arguments:
+	key      String. Configuration key.
+
+	Returns:
+	None on error, else True or False.
+	"""
+	val = get_config(key)
+	if val is None:
+		return None
+	if val.lower() in ['yes', 'y', 'true', 't', '1', 'enabled', 'on']:
+		return True
+	elif val.lower() in ['no', 'n', 'false', 'f', '0', 'disabled', 'off']:
+		return False
+	else:
+		sys.stderr.write("ERROR (get_config_bool): Configuration option %s must be a boolean value.\n" % key)
+		exit(1)
+
+def get_all_config(omit_default = True):
+	"""Get all configuration values.
+
+	Argument:
+	omit_default  False or True (default). If True, exclude all key-value pairs
+	              that are set to their default values.
+
+	Returns:
+	A dict instance with all (non-default, if applicable) key-value pairs
+	"""
+	if len(_config) == 0:
+		# This happens in parallel processes created with 'spawn' method (only option for Windows).
+		# Reload configuration for this process, but do not rewrite the file:
+		initialize_config(do_write = False)
+	all_config = {}
+	for key in sorted(default_config):
+
+		if key in _config:
+			if (not omit_default) or _config[key] != default_config[key]:
+				all_config[key] = _config[key]
+		elif not omit_default:
+			all_config[key] = default_config[key]
+	return all_config
+
+def set_config(key, val):
+	"""Set configuration value"""
+	if key in default_config:
+		_config[key] = val
+		return True
+	else:
+		return False
+
+def reset_config(key):
+	"""Reset configuration value to default value by deleting it from _config"""
+	if key in default_config:
+		if key in _config:
+			del _config[key]
+		return True
+	else:
+		return False
+
+def config_help(keys, helpfile='README', suggest=True):
+	"""Show help for configuration values from the help file (formerly README)
+
+	Find lines in the help file that start with a matching configuration key,
+	then print to stdout until a non-indented or empty line is found.
+
+	Arguments:
+	keys         String or list. If a string, match that one key. If a list,
+	             match all keys in the list.
+	helpfile     String. The path to the help file (README file).
+	suggest      True or False. If True, show help also for suggested
+	             alternatives if there are any invalid keys in the input.
+
+	No return value.
+	"""
+	if isinstance(keys, str):
+		keys = [keys]
+	if not isinstance(keys, list):
+		raise TypeError("Argument keys must be a str or a list instance.")
+	valid_keys = [key for key in keys if key in default_config]
+	invalid_keys = [key for key in keys if key not in default_config and key not in deprecated_config]
+	# Deprecated keys are also considered valid keys in this context, because
+	# they still have an entry in the help file.
+	if len(invalid_keys) > 0:
+		sys.stderr.write("Warning (config_help): The input contains the following invalid key%s: %s.\n" % ("s" if len(invalid_keys) >= 2 else "", ", ".join(invalid_keys)))
+		suggestions = suggest_keys(invalid_keys)
+		if suggest and len(suggestions) > 0:
+			sys.stderr.write("Warning (config_help): Suggested valid keys: " + ", ".join(suggestions) + ".\n")
+		valid_keys.extend(suggestions)
+	if len(valid_keys) == 0:
+		return
+
+	key_found = False
+	pattern = re.compile(r'([A-Za-z_0-9]+)(,\s*[A-Za-z_0-9]+)*')
+	with open(helpfile, 'r') as f:
+		for ln in f:
+			l = ln.rstrip()
+			if len(l) == 0:
+				key_found = False
+			elif l.startswith(' ') or l.startswith('\t'):
+				if key_found:
+					print(l)
+			elif pattern.match(l) is not None and any(key in l for key in valid_keys):
+				key_found = True
+				print(l)
+			else:
+				key_found = False
+
+### CONFIG FILE I/O ###
+
+def suggest_keys(invalid_keys):
+	all_suggestions = []
+	for key in invalid_keys:
+		for suggestion in get_close_matches(key, default_config.keys(), n = 3, cutoff = 0.7):
+			if suggestion not in all_suggestions:
+				all_suggestions.append(suggestion)
+	return all_suggestions
+
+def check_config(keys, suggest=True):
+	"""Check if keys are valid config values"""
+	if isinstance(keys, str):
+		keys = [keys]
+	if not isinstance(keys, list):
+		raise TypeError("Argument keys must be a str or a list instance.")
+	invalid_keys = [key for key in keys if key not in default_config and key not in deprecated_config]
+	if len(invalid_keys) > 0:
+		sys.stderr.write("Warning (check_config): The config contains the following invalid key%s: %s.\n" % ("s" if len(invalid_keys) >= 2 else "", ", ".join(invalid_keys)))
+		suggestions = suggest_keys(invalid_keys)
+		if suggest and len(suggestions) > 0:
+			sys.stderr.write("Warning (check_config): Suggested valid keys: " + ", ".join(suggestions) + ".\n")
+	return len(invalid_keys) == 0
+
+def cmdargs_config():
+	"""Take from command line, either a file name or a string with configuration values.
+	Multiple inputs are possible."""
+	if 'config' not in sys.argv[2:]:
+		return False
+	success = False
+	for argn, arg in enumerate(sys.argv):
+		if arg.lower() == 'config':
+			if argn + 1 >= len(sys.argv):
+				sys.stderr.write("ERROR (initialize_config): Argument 'config' must be followed by a valid file name or configuration values.\n")
+				exit(1)
+			custom_config = sys.argv[argn + 1]
+			if os.path.isfile(custom_config):
+				success |= read_config(custom_config)  # read file
+			elif os.path.isfile(os.path.join(configpath, custom_config)):
+				success |= read_config(os.path.join(configpath, custom_config))
+			else:
+				config_data = custom_config.split(";")  # take from command line
+				success |= parse_config(config_data)
+				if os.getcwd() not in configpaths:
+					configpaths.append(os.getcwd())
+	return True
+
+def read_config(filename, init = False, warn_deprecated = True):
+	"""Open and read config file
+
+	Arguments:
+	filename         Filename
+	init             True if file has to be interpreted as 'initial'
+	                 configuration file, i.e., kdotpyrc on the default location.
+	                 False if not.
+	warn_deprecated  True or False. Whether to show a deprecation warning for
+	                 deprecated configuration keys.
+
+	Returns:
+	True on success, False on error
+	"""
+	error_str = "(default)" if init else filename
+	try:
+		f = open(filename, 'r')
+	except:
+		sys.stderr.write("ERROR (read_config): Cannot read config file %s\n" % error_str)
+		raise
+	success = parse_config(f, error_str=error_str, warn_deprecated=warn_deprecated)
+	f.close()
+	filedir = os.path.dirname(os.path.abspath(filename))
+	if success and filedir not in configpaths:
+		configpaths.append(filedir)
+	return success
+
+def parse_config(data_or_file, error_str = None, warn_deprecated = True):
+	"""Parse configuration from command line input.
+	This function works by virtue of a generic iterable argument (data_or_file).
+	When iterated over this argument, it yields strings of the form 'key=value',
+	which are then parsed to configuration key-value pairs.
+
+	Arguments:
+	data_or_file   An iterable whose elements are strings. This may be a list of
+	               strings of the form 'key=value' or a file handler (from
+	               open(filename, 'r'), for example), among others.
+	error_str      None or string. This string will be used in error messages.
+	               It should be the filename of the config file. If None, do not
+	               print the filename in the error messages.
+	warn_deprecated  True or False. Whether to show a deprecation warning for
+	                 deprecated configuration keys.
+
+	Returns:
+	True on success, False on error
+	"""
+
+	global _config
+	invalid_keys = []
+	deprecated_keys = []
+	valid_lines = 0
+	for l in data_or_file:
+		m = re.match(r'\s*([_a-z0-9]*)(\s*)=(\s*)(.*)', l.strip())
+		if m is not None:
+			key, sleft, sright, val = m.groups()
+			if len(key) == 0:
+				pass
+			elif key in default_config:
+				_config[key] = str(val)
+				valid_lines += 1
+			elif key in deprecated_config:
+				deprecated_keys.append(key)
+			else:
+				invalid_keys.append(key)
+
+	success = True
+	if valid_lines == 0 and error_str != "(default)":  # ignore 'no valid keys' warning for default configuration file
+		sys.stderr.write("Warning (parse_config): No valid configuration keys. Check whether %s is a valid configuration %s.\n" % ("this" if error_str is None else error_str, "input" if error_str is None else "file"))
+		success = False
+	elif len(invalid_keys) > 0:
+		sys.stderr.write("Warning (parse_config): The config%s contains the following invalid key%s, which %s been ignored: %s.\n" % ("" if error_str is None else " file " + error_str, "s" if len(invalid_keys) >= 2 else "", "have" if len(invalid_keys) >= 2 else "has", ", ".join(invalid_keys)))
+	if len(invalid_keys) > 0:
+		suggestions = suggest_keys(invalid_keys)
+		if len(suggestions) > 0:
+			sys.stderr.write("Warning (parse_config): Suggested valid keys: " + ", ".join(suggestions) + ".\n")
+	if len(deprecated_keys) > 0:
+		config_str = "config" if error_str is None else f"config file {error_str}"
+		key_str = "keys" if len(deprecated_keys) >= 2 else "key"
+		has_str = "have" if len(deprecated_keys) >= 2 else "has"
+		list_str = ", ".join(deprecated_keys)
+		sys.stderr.write(f"Warning (parse_config): The {config_str} contains the following deprecated {key_str}, which {has_str} been ignored: {list_str}.\n")
+		if warn_deprecated and error_str == "(default)":
+			sys.stderr.write(f"Warning (parse_config): You may disable the {key_str} by using the following command:\n")
+			sys.stderr.write("  kdotpy config reset " + " ".join(deprecated_keys) + "\n")
+		elif warn_deprecated and error_str is not None:
+			sys.stderr.write(f"Warning (parse_config): Remove the {key_str} from the config file {error_str} manually to silence this warning.\n")
+
+	return success
+
+def write_config(deprecate=None):
+	"""Write configuration file.
+
+	Argument:
+	deprecate   None or a list. Deprecate all keys in the list.
+
+	Note:
+	For kdotpy, it is not necessary to call this function upon exit, because the
+	program does not change the configuration by itself.
+	"""
+	global _config
+	if not os.path.exists(configpath):
+		try:
+			os.mkdir(configpath)
+		except:
+			sys.stderr.write("ERROR (write_config): Cannot create config file\n")
+			raise
+
+	# Read existing config file so that it can be rewritten with only the
+	# relevant config keys changed, keeping the rest
+	config_filename = os.path.join(configpath, 'kdotpyrc')
+	config_data = []
+	valid_keys = []
+	if deprecate is None:
+		deprecate = []
+	deprecated_keys = []
+
+	if os.path.exists(config_filename):
+		with open(config_filename, 'r') as f1:
+			for l in f1:
+				m = re.match(r'#?\s*([_a-z0-9]*)(\s*)=(\s*)(.*)', l.strip())
+				if m is None:
+					config_data.append(l)
+					continue
+				key, sleft, sright, val = m.groups()
+				if key in _config:
+					valid_keys.append(key)
+					val = _config[key]
+					config_data.append(f"{key}{sleft}={sright}{val}\n")
+				elif key in default_config:
+					valid_keys.append(key)
+					val = default_config[key]
+					config_data.append(f"# {key}{sleft}={sright}{val}\n")
+				elif key in deprecate and key in deprecated_config:
+					deprecated_keys.append(key)
+					if not val.endswith("# DEPRECATED"):
+						val += "  # DEPRECATED"
+					config_data.append(f"# {key}{sleft}={sright}{val}\n")
+				else:
+					config_data.append(l)
+
+	for key in sorted(default_config):
+		if key in valid_keys:
+			pass
+		elif key in _config:
+			config_data.append(key + '=' + _config[key] + '\n')
+		else:
+			config_data.append('# ' + key + '=' + default_config[key] + '\n')
+
+	if len(deprecated_keys) > 0:
+		key_str = "keys" if len(deprecated_keys) >= 2 else "key"
+		has_str = "have" if len(deprecated_keys) >= 2 else "has"
+		list_str = ", ".join(deprecated_keys)
+		sys.stderr.write(f"Warning (write_config): The following {key_str} {has_str} been commented out and marked deprecated: {list_str}.\n")
+
+	try:
+		f = tempfile.NamedTemporaryFile('w', dir = configpath, prefix ='kdotpyrc-', delete = False)
+	except:
+		sys.stderr.write("ERROR (write_config): Cannot create config file\n")
+		raise
+	tmpname = f.name
+	if len(config_data) == 0 or config_data[0].strip() != '## kdotpy configuration file':
+		f.write('## kdotpy configuration file\n')
+	for l in config_data:
+		f.write(l)
+	f.close()
+	try:
+		os.replace(tmpname, os.path.join(configpath, 'kdotpyrc'))
+	except:
+		sys.stderr.write("Warning (write_config): Cannot replace config file. Can be caused by many multiple processes accessing the file simultaneously.\n")
+
+def get_editor(default = 'nano'):
+	"""Get command for editor from environment variable"""
+	if 'VISUAL' in os.environ:
+		return os.environ['VISUAL']
+	elif 'EDITOR' in os.environ:
+		return os.environ['EDITOR']
+	return default
+
+def edit_configfile(filename = None):
+	"""Edit config file using editor"""
+	editor = get_editor()
+	if filename is None:
+		filename = os.path.join(configpath, 'kdotpyrc')
+	try:
+		subp.run([editor, filename])
+	except OSError:
+		sys.stderr.write("ERROR (edit_configfile): Unable to open editor.\n")
+		raise
+	else:
+		print("{} {}".format(editor, filename))
+	return
+
+def get_configfiles():
+	global configpaths
+	filelist = [os.path.join(p, 'kdotpyrc') for p in configpaths if os.path.exists(p)]
+	return [f for f in filelist if os.path.isfile(f)]
diff --git a/kdotpy-v1.0.0/src/kdotpy/density/__init__.py b/kdotpy-v1.0.0/src/kdotpy/density/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..59ed5125e9444a771018b9fa1f3a55fc4d7bfb26
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/density/__init__.py
@@ -0,0 +1,56 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+from .densitydata import DensityData, DensityDataByBand, IntegratedObservable
+from .densitydata import energy_at_idos
+
+from .densityscale import DensityScale
+
+from .density import integrated_dos, integrated_dos_by_band
+from .density import local_integrated_dos, integrated_dos_ll
+
+from .densityz import densityz_energy, densityz, densityz_ll
+from .densityz import densityz_surface_states
+from .densityz import print_densityz
+
+from .intobs import integrated_observable
+
+from .broadening import BroadeningFunction, opts_to_broadening
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/density/base.py b/kdotpy-v1.0.0/src/kdotpy/density/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..242379a1d29399fdae8e55b40b8c4fae23f4664d
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/density/base.py
@@ -0,0 +1,715 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+from os import environ
+environ['OMP_NUM_THREADS'] = '1'
+import numpy as np
+import sys
+
+from ..config import get_config_bool
+from ..momentum import VectorGrid, Vector
+from ..parallel import Progress
+from ..types import DiagData
+from ..physconst import eoverhbar
+
+from .broadening import n_step, idos_broadening, BroadeningFunction, MultiBroadening
+from .elements import elementary_triangles, elementary_tetrahedra, interpolate2d, values_over_simplex
+from .elements import linear_idos_element, triangle_idos_element, tetrahedral_idos_element
+from .elements import triangle_area_element, tetrahedral_volume_element
+
+### HELPER FUNCTIONS ###
+
+def energy_select(ee, eimin=None, eimax=None):
+	"""Select energy range below, between and above eimin and eimax"""
+	if eimin is None:
+		eimin = -np.inf
+	if eimax is None:
+		eimax = np.inf
+	eesel = (ee >= eimin) & (ee <= eimax)
+	eeabove = (ee > eimax)
+	eebelow = (ee < eimin)
+	return eebelow, eesel, eeabove
+
+def count_nontrivial_bands3d(data, eimin=None, eimax=None):
+	"""Determine how many bands lie inside the energy range (i.e., for which the DOS calculation is nontrivial"""
+	if eimin is None:
+		eimin = -np.inf
+	if eimax is None:
+		eimax = np.inf
+	nbands = 0
+	bidx = data.get_all_bindex()
+	for b in bidx:
+		_, ei = data.get_plot_coord(b, "index3d")
+		if not (np.nanmax(ei) < eimin or np.nanmin(ei) > eimax):
+			nbands += 1
+	return nbands
+
+### BASIC DOS FUNCTIONS ###
+
+def int_dos(data, ee, broadening = None, radial = True):
+	"""Integrated density of states
+
+	Calculate the integrated density of states for each k value separately and
+	integrate over k space.
+	The DOS may be extracted easily by differentiation.
+
+	Arguments:
+	data        DiagData instance. The dispersion data.
+	ee          Numpy array. Energy values.
+	broadening  Broadening parameter
+	radial      True or False. If True, assume one-dimensional input is a polar
+	            radius. If False, assume a Cartesian coordinate. This argument
+	            has no effect on two- and three-dimensional grids.
+
+	Returns:
+	idos        Numpy array. The integrated density as function of energy.
+	"""
+	if 'verbose' in sys.argv:
+		print('int_dos: broadening', broadening)
+	kgrid = data.get_momentum_grid()
+	if not isinstance(kgrid, VectorGrid):
+		raise TypeError("VectorGrid expected")
+	da = kgrid.integration_element(full = radial)
+	if da is None:
+		return None
+
+	## Use the local integrated density of states (see below) and integrate over space
+	lidos = loc_int_dos(data, ee, broadening = broadening)
+	if lidos is None:
+		return None
+	return np.dot(da, lidos)
+
+
+def loc_int_dos(data, ee, broadening = None):
+	"""Local integrated density of states
+
+	Calculate the integrated density of states for each k value separately.
+	The local DOS may be extracted easily by differentiation.
+
+	Arguments:
+	data        DiagData instance. The dispersion data.
+	ee          Numpy array. Energy values.
+	broadening  Broadening parameter
+
+	Returns:
+	lidos       Numpy array. The local integrateed density as function of k and
+	            energy.
+	"""
+	if broadening is None:
+		broadening = BroadeningFunction('step', 0)
+	elif not isinstance(broadening, (BroadeningFunction, MultiBroadening)):
+		raise TypeError("Invalid type for broadening argument")
+	if 'verbose' in sys.argv:
+		print('loc_int_dos: broadening', broadening)
+	lidos = np.zeros((len(data), len(ee)), dtype = float)
+
+	# Parse neutral energies
+	e_neutral = data.get_e_neutral(flat=True)
+	if e_neutral is None:
+		sys.stderr.write("ERROR (loc_int_dos): Cannot find neutral energies for all data points.\n")
+		return None
+
+	for j, d in enumerate(data):
+		n_below = np.count_nonzero(d.eival < e_neutral[j])  # count states below neutral energy
+		eivals, erange = np.meshgrid(d.eival, ee, sparse = True)
+		all_occ = broadening.occupation(eivals - erange, index = j)
+
+		lidos[j] = (np.sum(all_occ, axis = 1) - n_below)
+	return lidos
+
+
+def loc_int_dos_by_band(data, ee, broadening = None, band = None):
+	"""Local integrated density of states by band
+
+	Like loc_int_dos(), but do not sum over the bands.
+	"""
+	# If optimize_erange is set to True, restrict the explicit calculations to
+	# the energies in each band only (extended by one energy step size) and set
+	# the other values to 0, +1, or -1 as appropriate. If set to False, imitate
+	# the "old" behaviour by selecting the full energy range. This is slower, so
+	# it is recommended for debugging only. This option affects only 1 and 2
+	# dimensions, as for 3 dimensions the optimization is already done inside
+	# tetrahedral_idos_element().
+	optimize_erange = True
+
+	if 'verbose' in sys.argv:
+		print(f"loc_int_dos_by_band ({band}): broadening {broadening}")
+
+	bidx = data.get_all_bindex()
+	if bidx is None:
+		sys.stderr.write("Warning (dos_by_band): Band indices are required but not present.\n")
+		return None
+	# Select specific band; otherwise sum over all
+	if band is not None and band in bidx:
+		bidx = [band]
+
+	# Get neutral energies
+	e_neutral = data.get_e_neutral()
+	if e_neutral is None:
+		sys.stderr.write("ERROR (loc_int_dos_by_band): Cannot find neutral energies for all data points.\n")
+		return None
+
+	if len(data.shape) == 1:
+		nk = data.shape[0] - 1
+		ne = len(ee)
+		de = 0 if ne == 1 else (ee.max() - ee.min()) / (ne - 1)
+		lidos = np.zeros((nk, ne))
+
+		for b in bidx:
+			_, ei = data.get_plot_coord(b, "index")
+			if np.all(np.isnan(ei)):
+				sys.stderr.write("Warning (loc_int_dos_by_band): No data for band %s.\n" % b)
+				continue
+			eimin = np.nanmin(ei) - de if optimize_erange else None
+			eimax = np.nanmax(ei) + de if optimize_erange else None
+			eebelow, eesel, eeabove = energy_select(ee, eimin=eimin, eimax=eimax)
+
+			# determine whether electron or hole
+			electrons = np.all(ei > e_neutral, where=~np.isnan(ei))
+			holes = np.all(ei < e_neutral, where=~np.isnan(ei))
+			if not electrons and not holes:
+				sys.stderr.write(f"ERROR (loc_int_dos_by_band): Band {b} neither electron-like nor hole-like.\n")
+				return None
+
+			# Get IDOS element
+			if np.count_nonzero(eesel):
+				e1 = np.vstack((ei[1:], ei[:-1])).T
+				lidos[:, eesel] += linear_idos_element(e1, ee[eesel], holes = holes)
+			if np.count_nonzero(eeabove) and electrons:
+				lidos[:, eeabove] += 1.0
+			if np.count_nonzero(eebelow) and holes:
+				lidos[:, eebelow] += -1.0
+
+	elif len(data.shape) == 2 and data[0].k.vtype in ['xy', 'xyz', 'pol', 'cyl', 'sph']:
+		nk = (data.shape[0] - 1) * (data.shape[1] - 1)
+		ne = len(ee)
+		de = 0 if ne == 1 else (ee.max() - ee.min()) / (ne - 1)
+		lidos = np.zeros((4, nk, ne))
+
+		## Define elementary triangles which subdivide an elementary square
+		alltriangles = elementary_triangles()
+
+		for b in bidx:
+			_, ei = data.get_plot_coord(b, "index2d")
+			if np.all(np.isnan(ei)):
+				sys.stderr.write("Warning (loc_int_dos_by_band): No data for band %s.\n" % b)
+				continue
+			eimin = np.nanmin(ei) - de if optimize_erange else None
+			eimax = np.nanmax(ei) + de if optimize_erange else None
+			eebelow, eesel, eeabove = energy_select(ee, eimin=eimin, eimax=eimax)
+
+			# determine whether electron or hole
+			electrons = np.all(ei > e_neutral, where=~np.isnan(ei))
+			holes = np.all(ei < e_neutral, where=~np.isnan(ei))
+			if not electrons and not holes:
+				sys.stderr.write(f"ERROR (loc_int_dos_by_band): Band {b} neither electron-like nor hole-like.\n")
+				return None
+
+			# Iterate over four triangular simplices 1 2 5, 1 3 5, 3 4 5, 2 4 5
+			# in the elementary square, where the points are labelled as follows.
+			# 3   4
+			#   5
+			# 1   2
+			if np.count_nonzero(eesel):
+				for j, triangle in enumerate(alltriangles):
+					e1 = values_over_simplex(ei, triangle)
+					lidos[j][:, eesel] += triangle_idos_element(e1, ee[eesel], holes=holes)
+			if np.count_nonzero(eeabove) and electrons:
+				lidos[:, :, eeabove] += 1.0
+			if np.count_nonzero(eebelow) and holes:
+				lidos[:, :, eebelow] += -1.0
+
+		lidos = lidos.reshape((4 * nk, ne))
+	elif len(data.shape) == 3 and data[0].k.vtype in ['xyz', 'cyl', 'sph']:
+		nk = (data.shape[0] - 1) * (data.shape[1] - 1) * (data.shape[2] - 1)
+		ne = len(ee)
+		lidos = np.zeros((12, nk, ne))
+
+		## Define elementary tetrahedra which subdivides an elementary cube
+		alltetrahedra = elementary_tetrahedra()
+		ntet = len(alltetrahedra)
+		nbands = count_nontrivial_bands3d(data, eimin=np.amin(ee), eimax=np.amax(ee))
+		progress = Progress("Calculating local integrated DOS by band", nbands * ntet, n_threads = 1)
+		jb = 0
+		for b in bidx:
+			_, ei = data.get_plot_coord(b, "index3d")
+			in_eerange = not (np.nanmax(ei) < np.amin(ee) or np.nanmin(ei) > np.amax(ee))
+
+			# determine whether electron or hole
+			# (note that where=~np.isnan(ei) is not needed for 3D)
+			electrons = np.all(ei > e_neutral)
+			holes = np.all(ei < e_neutral)
+			if not electrons and not holes:
+				sys.stderr.write(f"ERROR (loc_int_dos_by_band): Band {b} neither electron-like nor hole-like.\n")
+				return None
+
+			# Iterate over 12 tetrahedral simplices in the elementary cube
+			for j, tetrahedron in enumerate(alltetrahedra):
+				e1 = values_over_simplex(ei, tetrahedron)
+				lidos[j] += tetrahedral_idos_element(e1, ee, holes = holes)
+				if in_eerange:
+					progress.show(jb * ntet + j + 1)
+
+			# Increase band counter if calculation has been nontrivial
+			# (eigenvalues within energy range)
+			if in_eerange:
+				jb += 1
+		lidos = lidos.reshape((12 * nk, ne))
+	else:
+		sys.stderr.write("Warning (loc_int_dos_by_band): Not implemented for dimensions > 3.\n")
+		return None
+
+	if broadening is not None:
+		if len(data) > 100:
+			sys.stderr.write("Warning (loc_int_dos_by_band): For this method, broadening is implemented through convolution, which is time consuming. If you integrate over space later,  applying the broadening afterwards is much more efficient.\n")
+		if 'verbose' in sys.argv:
+			print("loc_int_dos_by_band: call idos_broadening", broadening)
+		lidos = idos_broadening(lidos, ee, broadening = broadening)
+
+	return lidos
+
+
+def int_dos_by_band(
+		data, ee, broadening = None, return_dict = False, radial = True,
+		psi2z: dict = None, electrons = False, holes = False, same_weights=True):
+	"""Integrated density of states by band
+
+	Like int_dos(), but do not sum over the bands.
+
+	Additional arguments:
+	psi2z			Dict of 2d-arrays with dimensions [nk, nz] with band indices
+	                as keys. The values are the absolute value squared of the
+	                wave functions, not integrated over z. Used for calculation
+	                of densityz (see corresponding function in density.py).
+	same_weights	Boolean. If True (default), the weights when interpolating
+					psi2z are identical and the same mean value is applied to
+					all da-triangles. If False, for each da-triangle psi2z is
+					interpolated individually.
+	"""
+
+	if not electrons and not holes:
+		raise ValueError("The arguments electrons and holes may not be both False")
+
+	if 'verbose' in sys.argv:
+		print('int_dos_by_band: broadening', broadening)
+
+	print_multiplier = get_config_bool('dos_print_momentum_multiplier')
+
+	kgrid = data.get_momentum_grid()
+	if len(kgrid.shape) == 1:
+		if not radial:  # Cartesian coordinate
+			kval = kgrid.get_array()[0]
+			# kval = np.array([k.x() for k in data.get_momenta()]) #TODO: Other components
+			da = np.abs(np.diff(kval))
+		else:  # radial coordinate
+			kval = np.array([k.len() for k in data.get_momenta()])
+			rval = np.array([k.polar()[0] for k in data.get_momenta()])
+			mult = 1
+			if rval.min() < -1e-8:
+				if np.amax(np.abs(rval + rval[::-1])) < 1e-8:  # check if array is symmetric around 0
+					mult = 0.5
+				else:
+					sys.stderr.write("ERROR (int_dos_by_band): One-dimensional array is two-sided and not symmetric. Density cannot be calculated reliably in this case.\n")
+					return None
+			if print_multiplier:
+				print("Multiplier for density (momentum space extension):", mult, '* pi')
+			da = mult * np.pi * np.abs(np.diff(kval**2))
+	elif len(kgrid.shape) == 2:
+		polar = kgrid.vtype in ['pol', 'cyl', 'sph']
+		degrees = None if not polar else kgrid.degrees
+
+		kx, ky = kgrid.get_array()  # any combination of two components
+		xx, yy = np.meshgrid(kx, ky, indexing='ij')
+
+		if polar:
+			if tuple(kgrid.var) != ('r', 'phi'):
+				sys.stderr.write("ERROR (int_dos_by_band): Two-dimensional angular coordinates other than (r, phi) are not supported.\n")
+				return None
+			if degrees:  # convert to radians
+				yy *= np.pi / 180.
+
+		## Define elementary triangles which subdivide an elementary square
+		alltriangles = elementary_triangles()
+
+		# Iterate over 4 triangular simplices in the elementary square
+		nk = (xx.shape[0] - 1) * (xx.shape[1] - 1)
+		da = np.zeros((4, nk))
+		for j, triangle in enumerate(alltriangles):
+			x1 = values_over_simplex(xx, triangle)
+			y1 = values_over_simplex(yy, triangle)
+			da[j] = triangle_area_element(x1, y1, polar = polar)
+		da = da.flatten()  # equivalent to da.reshape((4 * nk,))
+
+		if polar and degrees:
+			# Completion for polar coordinates in degrees
+			mult = 360. / (max(ky) - min(ky))
+		elif polar:
+			# Completion for polar coordinates in radians
+			mult = 2. * np.pi / (max(ky) - min(ky))
+		else:
+			# Completion for Cartesian coordinates
+			mult = 1.0
+			if abs(min(kx)) < 1e-9:
+				mult *= 2.0
+			if abs(min(ky)) < 1e-9:
+				mult *= 2.0
+		if print_multiplier:
+			print("Multiplier for density (momentum space extension):", mult)
+		da *= mult
+	elif len(kgrid.shape) == 3:
+		spherical = (kgrid.vtype == 'sph')
+		cylindrical = (kgrid.vtype == 'cyl')
+		degrees = None if not (cylindrical or spherical) else kgrid.degrees
+		kx, ky, kz = kgrid.get_array()  # any combination of three components
+		xx, yy, zz = np.meshgrid(kx, ky, kz, indexing='ij')
+
+		if cylindrical and degrees:  # convert phi (2nd component) to radians
+			yy *= np.pi / 180.
+		elif spherical and degrees:  # convert phi and theta (2nd and 3rd components) to radians
+			yy *= np.pi / 180.
+			zz *= np.pi / 180.
+
+		## Define elementary tetrahedra which subdivides an elementary cube
+		alltetrahedra = elementary_tetrahedra()
+
+		# Iterate over 12 tetrahedral simplices in the elementary cube
+		nk = (xx.shape[0] - 1) * (xx.shape[1] - 1) * (xx.shape[2] - 1)
+		da = np.zeros((12, nk))
+		for j, tetrahedron in enumerate(alltetrahedra):
+			x1 = values_over_simplex(xx, tetrahedron)
+			y1 = values_over_simplex(yy, tetrahedron)
+			z1 = values_over_simplex(zz, tetrahedron)
+			da[j] = tetrahedral_volume_element(x1, y1, z1, cylindrical = cylindrical, spherical = spherical)
+		da = da.flatten()  # equivalent to da.reshape((12 * nk,))
+
+		if cylindrical:
+			# Completion for cylindrical coordinates; phi in degrees or radians
+			mult = 360. if degrees else 2 * np.pi
+			mult /= (max(ky) - min(ky))
+			if abs(min(kz)) < 1e-9:  # for the z coordinate
+				mult *= 2.0
+		elif spherical:
+			# Completion for spherical coordinates; phi in degrees or radians
+			mult = 360. if degrees else 2 * np.pi
+			mult /= (max(kz) - min(kz))
+			# For cos(theta)
+			# Only consider a factor if theta lies in the interval [0, 180] deg
+			# TODO: The extension may only be useful if theta is the interval
+			# [0, 90] deg or [90, 180] deg
+			aunit = np.pi / 180. if degrees else 1.0
+			thetamin, thetamax = np.nanmin(ky) * aunit, np.nanmax(ky) * aunit
+			if thetamin > -1e-9 and thetamax > -1e-9 and thetamin < np.pi + 1e-9 and thetamax < np.pi + 1e-9 and thetamax - thetamin > 1e-9:
+				delta_cos_theta = np.cos(thetamin) - np.cos(thetamax)
+				mult *= 2 / delta_cos_theta
+		else:
+			# Completion for Cartesian coordinates
+			mult = 1.0
+			if abs(min(kx)) < 1e-9:
+				mult *= 2.0
+			if abs(min(ky)) < 1e-9:
+				mult *= 2.0
+			if abs(min(kz)) < 1e-9:
+				mult *= 2.0
+		if print_multiplier:
+			print("Multiplier for density (momentum space extension):", mult)
+		da *= mult
+	else:
+		return None
+
+	## Use the local integrated density of states (see above) and integrate over space
+	## Apply broadening later, i.e., over the integrated array, because it involves the
+	## time-consuming operation of convolution.
+	## Therefore we use broadening = None at this stage
+	bidx = data.get_all_bindex()
+	if bidx is None:
+		sys.stderr.write("Warning (dos_by_band): Band indices are required but not present.\n")
+		return None
+	if psi2z is not None:
+		nz = psi2z[bidx[0]].shape[1]
+
+		idos = np.zeros((nz, len(ee)))
+		for b in bidx:
+			if not electrons and b > 0:
+				continue
+			if not holes and b < 0:
+				continue
+			# Get local integrated density. 2d-array [nk, len(ee)]
+			lidos_b = loc_int_dos_by_band(data, ee, broadening = None, band = b)
+			if lidos_b is None:
+				return None
+			lidos_min, lidos_max = lidos_b.min(), lidos_b.max()
+
+			# Skip bands with zero LIDOS
+			if lidos_min == 0.0 and lidos_max == 0.0:
+				continue
+
+			if len(kgrid.shape) == 1:
+				# Interpolate between adjacent points
+				interpolated_psi2z = (psi2z[b][:-1] + psi2z[b][1:]) / 2
+			elif len(kgrid.shape) == 2:
+				# Interpolate from corners of square
+				# (see definition in calculation of da above)
+				nks = tuple(len(k) for k in kgrid.get_array())
+				if same_weights:
+					# Same mean for all triangles, hence repeat 4-times
+					interpolated_psi2z = np.concatenate([interpolate2d(psi2z[b], nks, weights=[1, 1, 1, 1])] * 4)
+				else:
+					# Individual mean for each triangle
+					interpolated_psi2z = np.concatenate([
+						interpolate2d(psi2z[b], nks, weights=weights)
+						# triangles [# 1 2 5, # 1 3 5, # 3 4 5, # 2 4 5]
+						for weights in [[5, 5, 1, 1], [5, 1, 5, 1], [1, 1, 5, 5], [1, 5, 1, 5]]
+					])
+			else:
+				raise NotImplementedError("int_dos_by_band: Interpolation of psi2z for %dd k-grid not implemented." % (len(kgrid.shape)))
+			# Integrate over k-space
+			if lidos_max - lidos_min < 1e-12 * min(abs(lidos_min), abs(lidos_max)):  # lidos_b is constant
+				lidos_val = (lidos_min + lidos_max) / 2
+				idos += np.dot(da, interpolated_psi2z)[:, np.newaxis] * lidos_val
+			else:
+				# Only consider the energies where LIDOS != 0.
+				eesel = (np.amax(np.abs(lidos_b), axis = 0) > 0.0)
+				idos[:, eesel] += np.sum((da[:, np.newaxis, np.newaxis] * interpolated_psi2z[:, :, np.newaxis] * lidos_b[:, np.newaxis, eesel]), axis = 0)
+
+		# Apply broadening
+		if 'verbose' in sys.argv:
+			print("int_dos_by_band: call idos_broadening", broadening)
+		idos = idos_broadening(idos, ee, broadening = broadening)
+
+	elif return_dict:
+		idos = {}
+		for b in bidx:
+			if not electrons and b > 0:
+				continue
+			if not holes and b < 0:
+				continue
+			lidos_b = loc_int_dos_by_band(data, ee, broadening = None, band = b)
+			if lidos_b is None:
+				return None
+			## Integrate over space
+			idos[b] = np.dot(da, lidos_b)
+			## Apply broadening
+			if 'verbose' in sys.argv:
+				print("int_dos_by_band: call idos_broadening", broadening)
+			idos[b] = idos_broadening(idos[b], ee, broadening = broadening)
+	else:
+		if not electrons:  # Calculate holes only
+			if not any(b < 0 for b in bidx):
+				return np.zeros_like(ee)  # Return zero if there are no hole states
+			lidos_b = [loc_int_dos_by_band(data, ee, broadening = None, band = b) for b in bidx if b < 0]
+			if any(x is None for x in lidos_b):
+				return None
+			lidos = np.sum(lidos_b, axis = 0)  # sum over bands
+		elif not holes:  # Calculate electrons only
+			if not any(b > 0 for b in bidx):
+				return np.zeros_like(ee)  # Return zero if there are no electron states
+			lidos_b = [loc_int_dos_by_band(data, ee, broadening = None, band = b) for b in bidx if b > 0]
+			if any(x is None for x in lidos_b):
+				return None
+			lidos = np.sum(lidos_b, axis = 0)  # sum over bands
+		else:
+			lidos = loc_int_dos_by_band(data, ee, broadening = None)
+			if lidos is None:
+				return None
+
+		## Integrate over space
+		idos = np.dot(da, lidos)
+		if "verbose" in sys.argv:
+			print("int_dos_by_band")
+			print(da.shape, lidos.shape, "-->", idos.shape)
+
+		## Apply broadening
+		if 'verbose' in sys.argv:
+			print("int_dos_by_band: call idos_broadening", broadening)
+		idos = idos_broadening(idos, ee, broadening = broadening)
+
+	return idos
+
+def int_dos_by_band_ll(
+		data: DiagData, ee, broadening = None, return_dict = False, radial = True,
+		psi2z: dict = None, electrons = False, holes = False, same_weights = True,
+		assume_sorted_aligned = False):
+	"""Integrated density of states by band but for LLs. (most input parameters are not
+	used, but still kept for potential future compatibility to int_dos_by_band().)
+
+	Like int_dos(), but do not sum over the bands.
+
+	Additional arguments:
+	psi2z			Dict of 2d-arrays with dimensions [nk, nz] with band indices
+	                as keys. The values are the absolute value squared of the
+	                wave functions, not integrated over z. Used for calculation
+	                of densityz (see corresponding function in density.py).
+	same_weights	Boolean. If True (default), the weights when interpolating
+					psi2z are identical and the same mean value is applied to
+					all da-triangles. If False, for each da-triangle psi2z is
+					interpolated individually.
+	assume_sorted_aligned  Assume that all datapoints in data have the same, sorted bindex.
+	"""
+
+	if not electrons and not holes:
+		raise ValueError("The arguments electrons and holes may not be both False")
+
+	if 'verbose' in sys.argv:
+		print('int_dos_by_band: broadening', broadening)
+
+	## Use the local integrated density of states (see above) and integrate over space
+	## Apply broadening later, i.e., over the integrated array, because it involves the
+	## time-consuming operation of convolution.
+	## Therefore we use broadening = None at this stage
+	bidx = data.get_all_bindex()
+	if bidx is None:
+		sys.stderr.write("Warning (int_dos_by_band_ll): Band indices are required but not present.\n")
+		return None
+
+	ne = len(ee)
+	bval = data.get_paramval()
+	if isinstance(bval, VectorGrid):
+		bzval = bval.get_values('bz')
+	elif isinstance(bval, list) and len(bval) > 0 and isinstance(bval[0], Vector):
+		bzval = [b.z() for b in bval]
+	elif isinstance(bval, list) and len(bval) > 0 and isinstance(bval[0], (float, np.floating, int, np.integer)):
+		bzval = bval
+	else:
+		raise TypeError("Invalid values for bval")
+	nB = len(bzval)
+	ll_inv_area = np.abs(bzval) * eoverhbar / (2 * np.pi)  # LL-degeneracy per area
+
+	if psi2z is not None:
+		nz = psi2z[bidx[0]].shape[1]
+
+		idos = np.zeros((nB, nz, ne))
+		for b in bidx:
+			if isinstance(b, tuple):
+				# axial approximation
+				bandindex = b[-1]
+			else:
+				# noax
+				bandindex = b
+			if not electrons and bandindex > 0:
+				continue
+			if not holes and bandindex < 0:
+				continue
+
+			# The following is supposed to function similar to loc_int_dos_by_band(),
+			# but simplified since we don't need to integrate over k.
+			# Thus, only count states (no overlaps!) and apply LL degeneracy at the end.
+			if assume_sorted_aligned:
+				b_array_index = np.searchsorted(bidx, b)
+				ei = np.array([d.eival[b_array_index] for d in data])
+			else:
+				_, ei = data.get_plot_coord(b, "index")
+			if np.all(np.isnan(ei)):
+				sys.stderr.write(
+					"Warning (loc_int_dos_by_band): No data for band %s.\n" % b)
+				continue
+
+			# Get IDOS element
+			# This is of type boolean, which will be interpreted as integer in an arithmetic operation
+			lidos_b = ee[np.newaxis, :] > ei[:, np.newaxis]
+			if bandindex < 0:  # hole band
+				lidos_b = lidos_b - 1  # -= will not work here
+
+			idos += psi2z[b][:, :, np.newaxis] * lidos_b[:, np.newaxis, :]
+
+		# Apply broadening
+		if 'verbose' in sys.argv:
+			print("int_dos_by_band: call idos_broadening", broadening)
+		idos = idos_broadening(idos, ee, broadening = broadening, idos_xdim = 1)
+
+	else:
+		raise NotImplementedError()
+
+	# Apply LL degeneracy
+	idos = idos * ll_inv_area[:, np.newaxis, np.newaxis]
+
+	return idos
+
+### INTEGRATED OBSERVABLES ###
+
+def int_obsval(data, obs, ee, da, electrons = False, holes = False, local = False, sel_bindex = None):
+	"""Integrated observable, basic function used by integrated_observable().
+
+	Integrate over the k or B values only if local is False.
+
+	Arguments:
+	data        DiagData instance
+	obs         String or integer. The observable id.
+	ee          Numpy array. The energy values at which to calculate.
+	da          Numpy array. The area/volume element.
+	electrons   True or False. Whether to include electrons.
+	holes       True or False. Whether to include holes.
+	local       True or False. If True, return the local integrated observables
+	            (no integration over k or B). If False, integrate over k or B.
+	sel_bindex  List/array or None. If not None, consider the bands with these
+	            indices only. Note that the restriction to electrons or holes is
+	            applied as well.
+
+	Returns:
+	int_o       Numpy array of dimension 1 (if local is True) or 2 (if local is
+	            False). The (local) integrated observable.
+	"""
+	if not electrons and not holes:
+		raise ValueError("The arguments electrons and holes may not be both False")
+	int_o = np.zeros((len(data), len(ee)), dtype = float)
+	for j, d in enumerate(data):
+		regular = ~np.isnan(d.eival)   # We will discard NaN values
+		sel_h = (np.asarray(d.bindex) < 0)
+		sel_e = (np.asarray(d.bindex) > 0)
+		if sel_bindex is not None:  # Restrict to sel_bindex if requested
+			sel_b = np.isin(d.bindex, sel_bindex)
+			sel_h = sel_h & sel_b
+			sel_e = sel_e & sel_b
+		oval = np.real(d.get_observable(obs))
+		this_da = da if isinstance(da, (float, np.floating)) else da[j]
+		if holes:
+			# Broadcast arrays
+			ei1, ee1 = np.meshgrid(d.eival[sel_h & regular], ee, sparse = True)
+			# Calculate occupation
+			all_occ = n_step(ei1, ee1) - 1.0
+			# Perform integration. The integrand is nFermi(E, E_i) obsval(E_i).
+			# The volume element is dA. Broadening is applied later.
+			int_o[j, :] += this_da * (np.sum(all_occ * oval[sel_h & regular][np.newaxis, :], axis = 1))
+		if electrons:
+			# Repeat analogous steps for electrons
+			ei1, ee1 = np.meshgrid(d.eival[sel_e & regular], ee, sparse = True)
+			all_occ = n_step(ei1, ee1)
+			int_o[j, :] += this_da * (np.sum(all_occ * oval[sel_e & regular][np.newaxis, :], axis = 1))
+	return int_o if local else np.sum(int_o, axis = 0)
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/density/broadening.py b/kdotpy-v1.0.0/src/kdotpy/density/broadening.py
new file mode 100644
index 0000000000000000000000000000000000000000..e9ded4f4c32de61352edd5c78d569e084e7072e0
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/density/broadening.py
@@ -0,0 +1,873 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+import sys
+from scipy.special import erfc
+from ..config import get_config_int
+from ..momentum import VectorGrid
+from ..physconst import kB
+
+### BROADENING FUNCTIONS ###
+# These functions define the 'convolution kernels' for applying broadening to
+# an (integrated) density of states. They are defined such that they can take
+# array arguments. With numpy broadcasting, it is then easy to do an 'iteration'
+# over argument values. Example, assuming 1D arrays ee and widths:
+# f = n_gaussian(ee[np.newaxis, :], ef, widths[:, np.newaxis])
+
+def n_step(e, ef):
+	"""Step function (also as limit of the other broadening functions below)"""
+	return 0.5 + np.sign(ef - e) * 0.5
+# Note: This is equivalent to np.heaviside(ef - e, 0.5). However, since
+# np.heaviside() is defined for numpy version >= 1.13.0 only, let us choose the
+# more compatible definition with np.sign().
+
+def n_thermal(e, ef, tt):
+	"""Thermal broadening (Fermi function; width parameter is temperature)"""
+	if tt is None or np.amin(tt) < 0:
+		raise ValueError("Temperature argument tt should have numerical values >= 0")
+
+	x = np.divide(e - ef, kB * tt, where=(tt > 0))
+	return np.where(tt > 0, 0.5 * (np.tanh(-0.5 * x) + 1.0), n_step(e, ef))
+
+def n_fermi(e, ef, tau):
+	"""Fermi broadening (width parameter is energy tau = kB * T)"""
+	if tau is None or np.amin(tau) < 0:
+		raise ValueError("Broadening width argument tau should have numerical values >= 0")
+
+	x = np.divide(e - ef, tau, where=(tau > 0))
+	return np.where(tau > 0, 0.5 * (np.tanh(-0.5 * x) + 1.0), n_step(e, ef))
+
+def n_gaussian(e, ef, sigma):
+	"""Gaussian broadening (occupancy function for Gaussian with width sigma)"""
+	if sigma is None or np.amin(sigma) < 0:
+		raise ValueError("Broadening width argument sigma should have numerical values >= 0")
+
+	x_s2 = np.divide(e - ef, sigma * np.sqrt(2), where=(sigma > 0))
+	return np.where(sigma > 0, 0.5 * erfc(x_s2), n_step(e, ef))
+
+def n_lorentzian(e, ef, gamma):
+	"""Lorentzian broadening (occupancy function for Lorentzian with FWHM 2gamma)"""
+	if gamma is None or np.amin(gamma) < 0:
+		raise ValueError("Broadening width argument gamma should have numerical values >= 0")
+
+	x = np.divide(e - ef, gamma, where=(gamma > 0))
+	return np.where(gamma > 0, 0.5 + np.arctan(-x) / np.pi, n_step(e, ef))
+
+### BROADENING TOOLS ###
+
+broadening_warning_displayed = False
+berry_broadening_warning_displayed = False
+
+def opts_to_brf(btype, bscale, ll = False, default = None):
+	if btype in ['auto', 'automatic']:
+		btype = 'gauss' if ll else 'thermal'
+	if btype is not None and bscale is not None:
+		return BroadeningFunction(btype, bscale)
+	elif isinstance(default, tuple) and len(default) == 2 and btype is None:
+		return BroadeningFunction(*default)
+	elif isinstance(default, dict) and btype is not None and btype in default:
+		return BroadeningFunction(btype, default[btype])
+	else:
+		return None
+
+def opts_to_broadening(opts, berry = False, verbose = False, ll = False, default = None):
+	"""Get BroadeningFunction instance(s) from options (input arguments)
+
+	Arguments:
+	opts     Dict instance. The options dictionary that it obtained from command
+	         line arguments.
+	berry    True or False. If True, return two BroadeningFunction instances,
+	         one for density of states, one for (integrated) Berry curvature
+	         (or Chern/Hall). If False, return only the instance for density of
+	         states.
+	verbose  True or False. If True, print extra diagnostic information.
+	ll       True or False. Determines whether we are in the LL mode or not.
+	         This determines the interpretation of 'auto' ('automatic')
+	         broadening type.
+	default  None or 2-tuple, or dict. If a tuple, it should of the form (btype,
+	         width). It is passed as argument to BroadeningFunction if opts does
+	         not contain a broadening option. If a dict, it should be of the
+	         form {btype: width, ...}. If opts contains a broadening type
+	         without width, use the value corresponding to that type from the
+	         dict.
+
+	Returns:
+	broadening        BroadeningFunction. Broadening function for DOS.
+	berry_broadening  BroadeningFunction. Broadening function for Berry/Chern/
+	                  Hall. Only if argument berry is True.
+	"""
+	global broadening_warning_displayed
+	global berry_broadening_warning_displayed
+	if verbose:
+		print('opts_to_broadening:')
+		for x in opts:
+			if 'broadening' in x:
+				print('%s: %s' % (x, opts[x]))
+	if not isinstance(opts, dict):
+		raise TypeError("Argument opts must be a dict instance")
+	btype = opts.get('broadening_type')
+	bscale = opts.get('broadening_scale')
+	if isinstance(btype, list) and isinstance(bscale, list):
+		if len(btype) != len(bscale):
+			raise ValueError("Options broadening_type and broadening_scale must have same length")
+		brfs = []
+		for t, s in zip(btype, bscale):
+			brf = opts_to_brf(t, s, ll = ll, default = default)
+			if brf is not None:
+				brfs.append(brf)
+		broadening = MultiBroadening(*brfs)
+		if all(brf.btype not in ['thermal', 'fermi'] for brf in broadening):
+			if not broadening_warning_displayed:
+				broadening_warning_displayed = True
+				sys.stderr.write("Warning (opts_to_broadening): Thermal broadening is neglected unless set explicitly.\n")
+	else:
+		broadening = opts_to_brf(btype, bscale, ll = ll, default = default)
+		if broadening is None or (isinstance(broadening, BroadeningFunction) and broadening.btype not in ['thermal', 'fermi']):
+			if not broadening_warning_displayed:
+				broadening_warning_displayed = True
+				sys.stderr.write("Warning (opts_to_broadening): Thermal broadening is neglected unless set explicitly.\n")
+	if berry:
+		btype_berry = opts.get('berrybroadening_type')
+		bscale_berry = opts.get('berrybroadening_scale')
+		if btype_berry in ['auto', 'automatic']:
+			btype_berry = 'gauss' if ll else 'thermal'
+		if btype_berry is not None and bscale_berry is not None:
+			berry_broadening = BroadeningFunction(btype_berry, bscale_berry)
+		elif btype_berry is None or (btype_berry == btype and bscale_berry is None):
+			berry_broadening = None if broadening is None else broadening.copy()
+		else:
+			berry_broadening = None
+		if 'berrybroadening_type' not in opts:
+			if not berry_broadening_warning_displayed:
+				berry_broadening_warning_displayed = True
+				sys.stderr.write("Warning (opts_to_broadening): Landau plateaus will be visible only if the broadening of the Berry curvature is set separately. For example, use 'broadening 1.0 10%'.\n")
+		if verbose:
+			print('opts_to_broadening:', broadening, berry_broadening)
+			if isinstance(broadening, BroadeningFunction):
+				broadening.print_verbose()
+		return broadening, berry_broadening
+	else:
+		if verbose:
+			print('opts_to_broadening:', broadening)
+			if isinstance(broadening, BroadeningFunction):
+				broadening.print_verbose()
+		return broadening
+
+def idos_broadening(idos, ee, broadening = None, **kwds):
+	"""Apply broadening (compatibility wrapper for BroadeningFunction and None input)"""
+	if broadening is None:
+		return idos
+	elif isinstance(broadening, BroadeningFunction):
+		if broadening.btype == 'step':
+			return idos  # do not do anything
+		else:
+			if 'verbose' in sys.argv:
+				broadening.print_verbose()
+			return broadening.apply_idos(idos, ee, **kwds)
+	elif isinstance(broadening, MultiBroadening):
+		if 'verbose' in sys.argv:
+			broadening.print_verbose()
+		return broadening.apply_idos(idos, ee, **kwds)
+	else:
+		raise TypeError("Invalid broadening input")
+
+def idos_convolve(ee_sub, ee_ext, idos_ext, df_occ):
+	"""Do IDOS convolution
+
+	Arguments:
+	ee_sub     Array of 1 dim. Energy values corresponding to the output.
+	ee_ext     Array of 1 dim. Energy values of the input, corresponding to
+	           idos_ext.
+	idos_ext   Array of 1 dim. The integrated density of states values.
+	df_occ     Array of 1-dim. The derivative of the occupancy function. This
+	           serves as the convolution kernel.
+	"""
+	idos_sub = np.interp(ee_sub, ee_ext, idos_ext)
+	return np.convolve(idos_sub, df_occ, mode='full')
+
+def iter_idos_dfocc(idos, df_occ):
+	"""Iterate over all but last axes of idos and df_occ"""
+	ne_idos = idos.shape[-1]
+	ne_dfocc = df_occ.shape[-1]
+	if df_occ.ndim == 1:
+		for i1 in idos.reshape(-1, ne_idos):
+			yield i1, df_occ
+	else:
+		nx = df_occ.size // ne_dfocc
+		for i1, d1 in zip(idos.reshape(nx, -1, ne_idos), df_occ.reshape(nx, ne_dfocc)):
+			for i2 in i1:
+				yield i2, d1
+	return
+
+### BROADENING CLASS ###
+
+class BroadeningFunction:
+	"""Container class for broadening functions.
+	This class implements the convolution operation to apply to the (integrated)
+	density of states.
+
+	Attributes:
+	btype          String. Broadening type, that determines the shape of the
+	               broadening kernel.
+	width          Float or array. The width parameter. Either a constant
+	               (single value) or a dependence (array).
+	nominal_width  Float or None. The nominal width parameter is the
+	               characteristic width, e.g., the broadening width for a
+	               magnetic field of 1 T. It is None if the instance is
+	               initiated with an array as width input.
+	eres_test_warning_issued    True or False. This is used by the member
+	                            function eres_test() to make sure the warning is
+	                            not repeated.
+	"""
+	def __init__(self, btype, width, width_dependence = None):
+		if not isinstance(btype, str):
+			raise TypeError
+		btype = btype.lower()
+		if btype in ['fermi', 'logistic', 'sech']:
+			self.btype = 'fermi'
+		elif btype in ['thermal']:
+			self.btype = 'thermal'
+		elif btype in ['gauss', 'gaussian', 'normal']:
+			self.btype = 'gauss'
+		elif btype in ['lorentz', 'lorentzian']:
+			self.btype = 'lorentz'
+		elif btype in ['step', 'delta']:
+			self.btype = 'step'
+		else:
+			raise ValueError("Invalid value for broadening type")
+		if isinstance(width, np.ndarray):
+			if (not np.issubdtype(width.dtype, np.floating)) and not (np.issubdtype(width.dtype, np.integer)):
+				raise ValueError("Argument width must be numeric or an array of numeric type")
+			if width.ndim >= 2:  # We may perhaps relax this condition later
+				raise ValueError("Argument width must not be an array of dimension >= 2.")
+			if np.amin(width) < 0.0:
+				raise ValueError("Argument width array should only contain values >= 0.")
+			self.width = width
+			self.nominal_width = None
+		elif isinstance(width, (float, np.floating, int, np.integer)):
+			if width < 0.0:
+				raise ValueError("Argument width should be >= 0.")
+			self.width = width
+			self.nominal_width = width
+		else:
+			raise TypeError
+		self.eres_test_warning_issued = False
+
+	def copy(self):
+		"""Return a new instance with the same parameters"""
+		new_instance = BroadeningFunction(self.btype, self.width)
+		new_instance.nominal_width = self.nominal_width
+		new_instance.eres_test_warning_issued = self.eres_test_warning_issued
+		return new_instance
+
+	def __repr__(self):
+		width_str = "" if self.nominal_width is None else (" %g" % self.nominal_width)
+		shape_str = " %s" % (self.width.shape,) if isinstance(self.width, np.ndarray) else ""
+		return "<Broadening '%s'%s%s>" % (self.btype, width_str, shape_str)
+
+	def get_conv_width(self, maximum = True):
+		"""Determine width of the convolution window
+
+		Argument:
+		maximum     True or False. If True, return the value corresponding to
+		            the largest width. Otherwise, return an array.
+
+		Returns:
+		conv_width  Array or float.
+		"""
+		# cw denotes the width for the 'standard' occupation function, i.e.,
+		# with width parameter set to 1
+		if self.btype == 'fermi':
+			cw = -np.log(1e-15)  # ~ 34.5
+		elif self.btype == 'thermal':
+			cw = -np.log(1e-15) * kB  # ~ 34.5 * kB
+		elif self.btype == 'gauss':
+			cw = np.sqrt(-2 * np.log(1e-15))  # ~ 8.3
+		elif self.btype == 'lorentz':
+			cw = 10.0
+			# A rather arbitrary choice; the asymptotic decrease of the
+			# occupation function for large x is "too slow"
+		elif self.btype == 'step':
+			cw = 0.0
+		else:
+			raise ValueError("Invalid value for self.btype")
+		return cw * np.amax(self.width) if maximum else cw * self.width
+
+	def eres_test(self, *args):
+		"""Issue a warning if the resolution is smaller than the broadening."""
+		if len(args) == 1 and isinstance(args[0], np.ndarray):
+			if len(args[0]) < 2:
+				return False
+			eres = (np.amax(args[0]) - np.amin(args[0])) / (len(args[0]) - 1)
+		elif len(args) == 1:
+			eres = args[0]
+		elif len(args) == 3:
+			eres = args[-1]
+		else:
+			raise ValueError("Invalid argument")
+		# w0 denotes the nominal width for the 'standard' occupation function,
+		# i.e., with width parameter set to 1
+		w0 = kB if self.btype == 'thermal' else 0 if self.btype == 'step' else 1
+		if isinstance(self.width, np.ndarray):
+			widths = self.width[self.width > 0]
+			width = 0 if widths.size == 0 else w0 * np.amin(widths)
+		else:
+			width = w0 * self.width
+		if width < eres:
+			if not self.eres_test_warning_issued:
+				sys.stderr.write("Warning (eres_test): The broadening is smaller than the resolution for small fields. If you encounter artifacts in the density of states (e.g., a fine structure of many narrow peaks), choose a larger broadening.\n")
+				self.eres_test_warning_issued = True
+			return False
+		return True
+
+	def occupation(self, ee, index = None):
+		"""Occupation function as function of energy, centered at zero.
+
+		Arguments:
+		ee      Float or numpy array. Calculate the occupation function with
+		        respect to this energy or these energies, where the Fermi level
+		        is assumed to be at 0.
+		index   Integer or tuple. Take the width parameter at this position in
+		        the width array. If width is a single number, ignore this
+		        argument.
+
+		Returns:
+		occ     Float or numpy array, depending on whether argument ee and the
+		        width parameters are numbers of arrays.
+		"""
+		if index is not None and isinstance(self.width, np.ndarray) and self.width.ndim == 1:
+			ee1 = ee
+			w1 = self.width[index]
+		elif isinstance(ee, np.ndarray) and isinstance(self.width, np.ndarray):  # prepare input arrays for broadcasting
+			ee1 = ee[np.newaxis, :]
+			w1 = self.width[:, np.newaxis]
+		else:
+			ee1 = ee
+			w1 = self.width
+		if self.btype == 'fermi':
+			occ = n_fermi(ee1, 0, w1)  # Width parameter is energy, equivalent to kB T
+		elif self.btype == 'thermal':
+			occ = n_thermal(ee1, 0, w1)  # Width parameter is temperature T
+		elif self.btype == 'gauss':
+			occ = n_gaussian(ee1, 0, w1)
+		elif self.btype == 'lorentz':
+			occ = n_lorentzian(ee1, 0, w1)
+		elif self.btype == 'step':
+			# The width parameter is ignored, but if it is an array, set the
+			# shape of the output array accordingly.
+			occ = n_step(ee1, 0) + np.zeros_like(w1) if isinstance(w1, np.ndarray) else n_step(ee1, 0)
+		else:
+			raise ValueError("Invalid value for self.btype")
+		return occ.item() if isinstance(occ, np.ndarray) and occ.ndim == 0 else occ
+
+	def diff_occupation(self, ee, index = None):
+		"""Gradient of the occupation function"""
+		f_occ = self.occupation(ee, index = index)
+		return -np.gradient(f_occ, axis = -1)
+
+	def apply_width(self, multipliers, in_place = False):
+		"""Apply multipliers to (re)define width parameter
+
+		This function uses the nominal width defined as a member variable.
+
+		Arguments:
+		multipliers   List or array.
+		in_place      True or False. If True, update the present instance. If
+		              False, return a new instance.
+		"""
+		multipliers = np.asarray(multipliers)
+		if self.nominal_width is None:
+			raise ValueError("Cannot apply width multipliers, because nominal_width is not set")
+		new_width = self.nominal_width * multipliers
+		if in_place:
+			self.width = new_width
+			return self
+		else:
+			new_brf = BroadeningFunction(self.btype, new_width)
+			new_brf.nominal_width = self.nominal_width
+			return new_brf
+
+	def apply_width_dependence(self, values, function, in_place = False):
+		"""Set width dependence depending on input argument function
+		This function calculates the multipliers by which nominal_width is
+		multiplied.
+
+		Arguments:
+		values     Array. The array to which the function is applied.
+		function   One of the following:
+		           None. Width is set to nominal_width.
+		           Callable. Call function(values).
+		           Number. Interpret as exponent e, i.e., mult = values ** e
+		           String. One of 'auto', 'automatic', 'const', 'sqrt', 'cbrt',
+		           'lin', 'linear'. Apply specified function to values. See
+		           listing in README.
+		in_place   True or False. If True, update the present instance. If
+		           False, return a new instance.
+		"""
+		values = np.asarray(values)
+		if function is None:
+			mult = 1
+		elif callable(function):
+			mult = function(values)
+		elif isinstance(function, (float, np.floating, int, np.integer)):
+			mult = np.power(values, function)
+		elif isinstance(function, str):
+			function = function.lower()
+			if function in ['auto', 'automatic']:
+				mult = np.sqrt(values) if self.btype == 'gauss' else 1
+			elif function == 'sqrt':
+				mult = np.sqrt(values)
+			elif function == 'cbrt':
+				mult = np.cbrt(values)
+			elif function in ['lin', 'linear']:
+				mult = values
+			elif function == 'const':
+				mult = 1
+			else:
+				sys.stderr.write("ERROR (BroadeningFunction.apply_width_dependence): Invalid value for argument function.\n")
+				exit(1)
+		else:
+			sys.stderr.write("ERROR (BroadeningFunction.apply_width_dependence): Invalid value for argument function.\n")
+			exit(1)
+		return self.apply_width(mult, in_place = in_place)
+
+	def interpolate_width(self, n_target, in_place = False):
+		"""Interpolate the width parameter array to the specified size.
+
+		Arguments:
+		n_target      Integer >= 2. The target length, where start and end point
+		              are included.
+		in_place      True or False. If True, update the present instance. If
+		              False, return a new instance.
+		"""
+		if not isinstance(n_target, (int, np.integer)):
+			raise TypeError("Argument n_target must be an integer >= 2")
+		if n_target < 2:
+			raise ValueError("Argument n_target must be an integer >= 2")
+		if not isinstance(self.width, np.ndarray):
+			return self if in_place else self.copy()
+		if self.width.size == 1:
+			return self if in_place else self.copy()
+		if self.width.ndim >= 2:
+			raise ValueError("Cannot apply interpolation if the width parameter is not a 1-dim array.")
+		if self.width.size == n_target:
+			return self if in_place else self.copy()
+		n_source = self.width.size
+		# TODO: Not fully appropriate for interpolation of quadratically spaced grids
+		new_width = np.interp(np.linspace(0, n_source - 1, n_target), np.linspace(0, n_source - 1, n_source), self.width)
+		if in_place:
+			self.width = new_width
+			return self
+		else:
+			new_brf = BroadeningFunction(self.btype, new_width)
+			new_brf.nominal_width = self.nominal_width
+			return new_brf
+
+	def apply_idos(self, idos, ee, subdivide=True, idos_xdim=None, idos_broadcast=False):
+		"""Apply broadening to integrated density of states using convolution.
+
+		Arguments:
+		idos            Numpy array. The integrated density of states that is
+		                broadened.
+		ee              Numpy array. The energies corresponding to the last
+		                dimension of the idos array.
+		subdivide       True or False. Whether the energy range can be
+		                subdivided to the minimum number of values specified by
+		                configuration option 'dos_convolution_points'.
+		idos_xdim       Integer or None. If an integer, the number of dimensions
+		                (axes) that refer to x (either k or B). If None, assume
+		                idos_xdim = idos.ndim - 1.
+		idos_broadcast  True or False. Whether to allow broadcasting a 1-dim
+		                idos array to a BroadeningFunction with multiple widths.
+		                If False (default), raise an exception if idos is a
+		                1-dim array and width is not a single number.
+
+		Note:
+		The shapes of the arrays must satisfy the following conditions.
+		If ee.shape = (n_e,) and width.shape = (n_w,), and d = idos_xdim, then
+		idos must have shape (n_i1, ..., n_id, n_j1, ... n_jm, n_e) where
+		n_i1 * ... * n_id = n_w, i.e., the first d axes must match the number
+		of entries in width. There may be an arbitrary number m of intermediate
+		axes, including m = 0, such that idos.shape = (n_i1, ..., n_id, n_e).
+		If idos_broadcast is set to true and idos is a one dimensional array,
+		then broadcast idos to (n_w, n_e). If the width is a single number,
+		the same broadening is applies by iterating over all axes in idos except
+		the last one.
+
+		Returns:
+		bidos      Numpy array of the same shape as the argument idos (unless
+		           idos_broadcast is True; see Note above). The broadened IDOS
+		           values.
+		"""
+		idos = np.asarray(idos)
+		if idos.ndim < 1:
+			raise ValueError("Argument idos must be an array-like object of dimension at least 1.")
+		ee = np.asarray(ee)
+		min_points = get_config_int('dos_convolution_points', minval = 10)
+
+		# Dimensionality of x coordinates (k, B) in idos
+		if idos_xdim is None:
+			idos_xdim = idos.ndim - 1
+		if idos_xdim > idos.ndim - 1:
+			raise ValueError("Number of x dimensions must not exceed idos.ndim - 1.")
+		if idos_xdim < 0:
+			raise ValueError("Number of x dimensions must be >= 0.")
+
+		# Size checks
+		if idos.shape[-1] != ee.shape[-1]:
+			raise ValueError("Sizes of IDOS array and energy value array do not match")
+		nx_width = self.width.size if isinstance(self.width, np.ndarray) else 1
+		nx_idos = np.prod(idos.shape[:idos_xdim], dtype=int)
+		if nx_idos == 1 and nx_width > 1:
+			if idos_broadcast:
+				idos = np.broadcast_to(idos, self.width.shape + (ee.shape[-1],))
+			else:
+				raise ValueError("By default, 1-dim array idos is not broadcast to match array self.width. Set optional argument idos_broadcast=True to allow broadcast.")
+		elif nx_width > 1 and nx_idos != nx_width:
+			raise ValueError("Sizes of IDOS array and broadening widths do not match, %s (#=%i) vs %s (#=%i)" % (idos.shape, nx_idos, self.width.shape, nx_width))
+		elif isinstance(self.width, np.ndarray) and nx_width == nx_idos and idos.shape[:idos_xdim] != self.width.shape:
+			sys.stderr.write("Warning (BroadeningFunction.apply_idos): Sizes of IDOS array and broadening widths match (%i), but shapes are different, %s vs %s.\n" % (nx_idos, idos.shape, self.width.shape))
+
+		# Determine width of convolution 'window'
+		de = (ee[-1] - ee[0]) / (len(ee) - 1)
+		# TODO: Check if energy array is equidistantly spaced
+		conv_width = self.get_conv_width()
+		n_ext = int(np.ceil(conv_width / de) + 1)
+
+		# Extend and subdivide energy array (to provide sufficient resolution)
+		left = np.linspace(ee[0] - n_ext * de, ee[0] - de, n_ext)
+		right = np.linspace(ee[-1] + de, ee[-1] + n_ext * de, n_ext)
+		ee_ext = np.concatenate((left, ee, right))
+		subdiv = int(np.ceil(min_points / (len(ee_ext) - 1))) if subdivide else 1
+		if subdiv > 1:
+			subdiv_de = np.linspace(0, de, subdiv + 1)[:-1]
+			ee_sub = (subdiv_de[np.newaxis, :] + ee_ext[:, np.newaxis]).flatten()[:-(subdiv - 1)]
+		else:
+			ee_sub = ee_ext
+
+		# Get occupancy function
+		ee_occ = np.linspace(-n_ext * de, n_ext * de, 2 * n_ext * subdiv + 1)
+		f_occ = self.occupation(ee_occ)
+		df_occ = -np.gradient(f_occ, axis = -1)
+
+		# Extend idos array
+		left = np.repeat(idos[..., 0:1], n_ext).reshape(idos.shape[:-1] + (n_ext,))
+		right = np.repeat(idos[..., -1:], n_ext).reshape(idos.shape[:-1] + (n_ext,))
+		idos_ext = np.concatenate((left, idos, right), axis = -1)
+
+		# Do the convolution, iterating over all but the last axes
+		# The result is a two-dimensional array, i.e., with all but the last
+		# axis being flattened. The result will be reshaped at the end.
+		idos_conv = np.array(
+			[idos_convolve(ee_sub, ee_ext, i, d) for i, d in iter_idos_dfocc(idos_ext, df_occ)]
+		)
+
+		# Extract the values at the specified energies
+		# Calculate convolution and corresponding energies
+		ee_conv = np.linspace(ee_sub[0] - ee_occ[-1], ee_sub[-1] - ee_occ[0], idos_conv.shape[-1])
+		if idos_conv.ndim == 1:
+			bidos = np.interp(ee, ee_conv, idos_conv)
+		else:
+			bidos = np.array([np.interp(ee, ee_conv, i) for i in idos_conv])
+			bidos = bidos.reshape(idos.shape)
+
+		return bidos
+
+	def print_verbose(self):
+		"""Verbose / debug output"""
+		print("BroadeningFunction attributes:")
+		all_att = [att for att in dir(self) if not att.startswith('__')]
+		for att in all_att:
+			val = getattr(self, att)
+			if not callable(val):
+				print("", att, type(val), val if isinstance(val, str) else str(val) if isinstance(val, (bool, int, float)) else val.shape if isinstance(val, (np.ndarray, VectorGrid)) else len(val) if isinstance(val, (list, tuple)) else '')
+
+class MultiBroadening:
+	def __init__(self, *args):
+		if len(args) == 0:
+			raise ValueError("MultiBroadening.__init__ called without arguments")
+		if all(isinstance(arg, BroadeningFunction) for arg in args):
+			self.brfs = list(args)
+		elif len(args) in [2, 3]:  # btypes, widths, width_dependences = None
+			btypes = args[0]
+			widths = args[1]
+			width_dependences = args[2] if len(args) > 2 else None
+			if not isinstance(btypes, (list, tuple, np.ndarray)) and len(btypes) > 0:
+				raise TypeError("Argument btypes must be a non-empty list, tuple, or array")
+			if not isinstance(widths, (list, tuple, np.ndarray)) and len(widths) > 0:
+				raise TypeError("Argument widths must be a non-empty list, tuple, or array")
+			if len(widths) != len(btypes):
+				raise ValueError("Arguments must be lists, tuples, or arrays of equal length")
+			if width_dependences is None:
+				self.brfs = [BroadeningFunction(btype, width) for btype, width in zip(btypes, widths)]
+			elif isinstance(width_dependences, (list, tuple, np.ndarray)):
+				if len(width_dependences) != len(btypes):
+					raise ValueError("Arguments must be lists, tuples, or arrays of equal length")
+				self.brfs = [BroadeningFunction(btype, width, wd) for btype, width, wd in zip(btypes, widths, width_dependences)]
+			else:
+				raise TypeError("Argument width_dependence must be a non-empty list, tuple, array or None")
+		else:
+			argtype = type(args[0])
+			if all(type(arg) == argtype for arg in args):
+				raise TypeError("Invalid type %s of input arguments" % argtype)
+			else:
+				raise ValueError("Invalid combination of input arguments")
+		self.btype = 'multi'
+		self.width = None
+		self.nominal_width = 1.0
+		self.eres_test_warning_issued = False
+
+	def shallow_copy(self):
+		"""Return a new instance with existing BroadeningFunction instances"""
+		return MultiBroadening(*self.brfs)
+
+	def deep_copy(self):
+		"""Return a new instance with newly intiated BroadeningFunction instances"""
+		new_brfs = [brf.copy() for brf in self.brfs]
+		return MultiBroadening(*new_brfs)
+
+	def copy(self, deep_copy = True):
+		"""Deep (default) or shallow copy"""
+		return self.deep_copy() if deep_copy else self.shallow_copy()
+
+	def __repr__(self):
+		n = len(self.brfs)
+		if n == 0:
+			btype_str = "none"
+		elif n <= 3:
+			btype_str = ", ".join([brf.btype for brf in self.brfs])
+		else:
+			btype_str = "%s, ..., %s" % (self.brfs[0].btype, self.brfs[-1].btype)
+		return "<MultiBroadening (%s; n=%i)>" % (btype_str, n)
+
+	def __len__(self):
+		return len(self.brfs)
+
+	def index(self, x):
+		return self.brfs.index(x)
+
+	def __iter__(self):
+		return iter(self.brfs)
+
+	def __getitem__(self, i):
+		return self.brfs[i]
+
+	def get_conv_width(self, maximum = True):
+		"""Determine width of the convolution window, summing over BroadeningFunction instances"""
+		cws = [brf.get_conv_width(maximum = maximum) for brf in self.brfs]
+		return sum(cws)
+
+	def eres_test(self, *args):
+		"""Issue a warning if the resolution is smaller than the broadening."""
+		n_warnings = 0
+		for brf in self.brfs:
+			warning_issued_cached = brf.eres_test_warning_issued
+			brf.eres_test_warning_issued = True  # suppress warning
+			result = brf.eres_test(*args)
+			if result:  # result is True
+				brf.eres_test_warning_issued = warning_issued_cached
+			else:  # result is False
+				n_warnings += 1
+				# Warning will be issued, so brf.eres_test_warning_issued can stay True
+
+		if n_warnings > 1:
+			if not self.eres_test_warning_issued:
+				sys.stderr.write("Warning (eres_test): The broadening is smaller than the resolution for small fields for %i out of %i BroadeningFunctions. If you encounter artifacts in the density of states (e.g., a fine structure of many narrow peaks), choose a larger broadening.\n" % (n_warnings, len(self.brfs)))
+				self.eres_test_warning_issued = True
+			return False
+		return True
+
+	def occupation_function(self, ee, index = None):
+		"""Get total occupation function by convolution of the separate occupation functions"""
+		if not isinstance(ee, np.ndarray):
+			raise TypeError("Argument ee must be a numpy array")
+		if ee.ndim != 1:
+			raise ValueError("Argument ee must be a one-dimensional numpy array")
+
+		## Special case (n = 1) TODO: not needed?
+		if len(self.brfs) == 1:
+			return self.brfs[0].occupation(ee, index = index)
+
+		## Determine subdivision of energy range; cf. BroadeningFunction.apply_idos()
+		de = (ee[-1] - ee[0]) / (len(ee) - 1)
+		min_points = get_config_int('dos_convolution_points', minval = 10)
+		subdiv = int(np.ceil(min_points / (len(ee) - 1)))
+		if subdiv > 1:
+			subdiv_de = np.linspace(0, de, subdiv + 1)[:-1]
+			ee_sub = (subdiv_de[np.newaxis, :] + ee[:, np.newaxis]).flatten()[:-(subdiv - 1)]
+		else:
+			ee_sub = ee
+
+		## Determine index of zero energy for proper alignment after convolution
+		i0 = np.argmin(np.abs(ee_sub))
+		if ee_sub[i0] > 1e-10:
+			raise ValueError("Argument ee (array of energy values) must contain zero")
+
+		## First occupation function
+		occ = self.brfs[0].occupation(ee_sub)
+		if occ.ndim > 1 and index is not None:
+			occ = occ[index]
+		## Repeatedly apply convolution with second, third ... occupation functions
+		for brf in self.brfs[1:]:
+			occ = brf.apply_idos(occ, ee_sub, subdivide = True, idos_broadcast = True)
+			if occ.ndim > 1 and index is not None:
+				occ = occ[index]
+			# Correct offset due to finite integration interval by aligning occ
+			# at zero energy to 0.5. This step is not needed for self.brfs[0].
+			if occ.ndim == 1:
+				offset = occ[i0] - 0.5
+				occ -= offset
+			else:
+				offset = occ[..., i0] - 0.5 * np.ones(shape = occ.shape[:-1])
+				occ -= offset[..., np.newaxis]
+		return ee_sub, occ
+
+	def occupation(self, ee, index = None):
+		"""Get total occupation by applying occupation function"""
+		## Special case (n = 1)
+		if len(self.brfs) == 1:
+			return self.brfs[0].occupation(ee, index = index)
+
+		## Generic case (n >= 1): Calculate occupation function by convolution
+		cw = self.get_conv_width(maximum = True)
+		conv_points = get_config_int('dos_convolution_points', minval = 10)
+		ee_occf = np.linspace(-cw, cw, conv_points + 1)
+		ee_occf, occf = self.occupation_function(ee_occf, index = index)
+
+		## Apply input energies using linear interpolation
+		if occf.ndim > 1:
+			raise NotImplementedError("Application of multidimensional result of MultiBroadening.occupation_function() not yet supported. Please iterate over index argument.")
+		occ = np.interp(ee, ee_occf, occf, left = 1.0, right = 0.0)
+		return occ
+
+	def diff_occupation(self, ee, index = None):
+		"""Gradient of the occupation function"""
+		f_occ = self.occupation(ee, index = index)
+		return -np.gradient(f_occ, axis = -1)
+
+	def apply_width(self, multipliers, in_place = False):
+		"""Apply multipliers to (re)define width parameter iteratively over BroadeningFunction instances."""
+		if in_place:
+			for brf in self.brfs:
+				brf.apply_width(multipliers, in_place = True)
+			return self
+		else:
+			new_brfs = [brf.apply_width(multipliers, in_place = False) for brf in self.brfs]
+			new_mbr = MultiBroadening(*new_brfs)
+			return new_mbr
+
+	def apply_width_dependence(self, values, function, in_place = False):
+		"""Set width dependence depending on input argument function iteratively over BroadeningFunction instances.
+
+		Arguments:
+		values     Array. The array to which the function is applied. If it is
+		           a multi-dimensional array, apply values[j] to the broadening
+		           functions iteratively. Otherwise, apply 'values' to each of
+		           them.
+		function   A function argument (None, callable, number, or string; see
+		           BroadeningFunction.apply_width_dependence() and README) or a
+		           list/array of these. In the latter case, apply function[j] to
+		           the broadening functions iteratively. Otherwise, apply
+		           'function' to each of them.
+		in_place   True or False. If True, update the present instance. If
+		           False, return a new instance.
+		"""
+		n = len(self.brfs)
+		if isinstance(values, list) or (isinstance(values, np.ndarray) and values.ndim >= 2):
+			nv = len(values)
+			if nv == 1:
+				values = values[0]
+		else:
+			nv = 1
+		if nv != 1 and nv != n:
+			raise ValueError("Input argument values has invalid length/shape")
+		if isinstance(function, (list, np.ndarray)):
+			nf = len(function)
+			if nf == 1:
+				function = function[0]
+		else:
+			nf = 1
+		if nf != 1 and nf != n:
+			raise ValueError("Input argument function has invalid length")
+		if in_place:
+			for j, brf in enumerate(self.brfs):
+				this_values = values if nv == 1 else values[j]
+				this_function = function if nf == 1 else function[j]
+				brf.apply_width_dependence(this_values, this_function, in_place = True)
+			return self
+		else:
+			new_brfs = []
+			for j, brf in enumerate(self.brfs):
+				this_values = values if nv == 1 else values[j]
+				this_function = function if nf == 1 else function[j]
+				new_brfs.append(brf.apply_width_dependence(this_values, this_function, in_place = False))
+			new_mbr = MultiBroadening(*new_brfs)
+			return new_mbr
+
+	def interpolate_width(self, n_target, in_place = False):
+		"""Interpolate the width parameter array to the specified size iteratively over BroadeningFunction instances."""
+		if in_place:
+			for brf in self.brfs:
+				brf.interpolate_width(n_target, in_place = True)
+			return self
+		else:
+			new_brfs = [brf.interpolate_width(n_target, in_place = False) for brf in self.brfs]
+			new_mbr = MultiBroadening(*new_brfs)
+			return new_mbr
+
+	def apply_idos(self, idos, ee, subdivide = True, idos_broadcast = False):
+		"""Apply broadening to integrated density of states using iterative convolution."""
+		for brf in self.brfs:
+			idos = brf.apply_idos(idos, ee, subdivide = subdivide, idos_broadcast = idos_broadcast)
+		return idos
+
+	def print_verbose(self):
+		"""Verbose / debug output"""
+		print("MultiBroadening attributes:")
+		all_att = [att for att in dir(self) if not att.startswith('__')]
+		for att in all_att:
+			val = getattr(self, att)
+			if not callable(val):
+				print("", att, type(val), val if isinstance(val, str) else str(val) if isinstance(val, (bool, int, float)) else val.shape if isinstance(val, (np.ndarray, VectorGrid)) else len(val) if isinstance(val, (list, tuple)) else '')
+		print("Members:")
+		for brf in self.brfs:
+			brf.print_verbose()
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/density/density.py b/kdotpy-v1.0.0/src/kdotpy/density/density.py
new file mode 100644
index 0000000000000000000000000000000000000000..582bde9c183e2be082971d53cb0cad5b9018cc79
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/density/density.py
@@ -0,0 +1,392 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+import sys
+
+from ..physconst import eoverhbar
+from ..momentum import Vector, VectorGrid
+from ..erange import get_erange
+from .broadening import BroadeningFunction, MultiBroadening
+from .base import loc_int_dos, int_dos, int_dos_by_band
+from .densitydata import DensityData, DensityDataByBand
+from .densitydata import data_interpolate_for_ldos, dos_validity_range
+
+
+### HELPER FUNCTION: FERMI ENERGY ###
+
+def linzero(x1, x2, y1, y2):
+	"""Get x coordinate where line between (x1, y1) and (x2, y2) intersects y = 0"""
+	return x1 - y1 * (x2 - x1) / (y2 - y1)
+
+def get_fermi_energy(ei_data, idos, ee, radial = True, broadening = None):
+	"""Get Fermi energy at the CNP from IDOS, taking special care of an extended region with IDOS = 0
+
+	Arguments:
+	ei_data        DiagData instance. Eigenvalue (diagonalization) data.
+	idos           Numpy array. The IDOS values.
+	ee             Numpy array. The energy values.
+	erange         Tuple of 3 floats or array. Energy range (min, max, res) or
+	               energy values in meV.
+	radial         True or False. If True, interpret 1D momentum values as radii
+	               in polar coordinates. If True, consider data to be from a
+	               one-dimensional (= cartesian) geometry.
+	broadening     Broadening parameter.
+
+	Returns:
+	ef0    Float. The Fermi energy at CNP, i.e., the energy where the IDOS is 0.
+	       If the method fails, return None.
+	"""
+	if idos.min() * idos.max() > 0:  # if min IDOS and max IDOS have the same sign
+		sys.stderr.write("Warning (get_fermi_energy): Fermi energy out of energy range.\n")
+		return None
+	if idos.min() == 0.0 and idos.max() == 0.0:
+		sys.stderr.write("Warning (get_fermi_energy): IDOS identically zero in energy range.\n")
+		return None
+	if idos.min() == 0.0:
+		sys.stderr.write("Warning (get_fermi_energy): Hole density (IDOS < 0) out of energy range.\n")
+		return None
+	if idos.max() == 0.0:
+		sys.stderr.write("Warning (get_fermi_energy): Electron density (IDOS > 0) out of energy range.\n")
+		return None
+
+	# Find the indices where idos reaches 0 from below and from above
+	i1 = np.count_nonzero(idos < 0.0)
+	i2 = len(ee) - np.count_nonzero(idos > 0.0)
+	if not (i1 > 0 and i2 < len(ee)):
+		raise ValueError("Invalid value for indices i1, i2 (%i, %i); valid range is [1, %i]" % (i1, i2, len(ee) - 1))
+
+	idosmin, idosmax = 1.0, -1.0
+	delta_emin, delta_emax = -1.5, 1.5
+	eres = (ee[-1] - ee[0]) / (len(ee) - 1)
+	# Define a smaller energy range arond the idos = 0 value, with 100x
+	# the original energy resolution.
+	# Extend lower and upper bound till the range contains a negative
+	# value at the lower boundary, positive value at the upper boundary.
+	while (not (idosmin <= 0.0 and idosmax >= 0.0)) and (delta_emax - delta_emin <= 20.0):
+		ee1 = get_erange(ee[i1 - 1] + delta_emin * eres, ee[i2] + delta_emax * eres, 0.01 * eres)
+		idos1 = int_dos_by_band(ei_data, ee1, radial = radial, broadening = broadening, electrons=True, holes=True)
+		if idos1 is None:
+			idos1 = int_dos(ei_data, ee1, radial = radial)
+		idosmin = np.amin(idos1)
+		idosmax = np.amax(idos1)
+		if idosmax < 0.0:
+			delta_emax += 1
+		if idosmin > 0.0:
+			delta_emin -= 1
+
+	ef0 = None
+	if np.count_nonzero(idos1 == 0.0) >= 1:
+		# If there are zero values, define the Fermi energy to be the
+		# mean of all of them.
+		ef0 = np.mean(ee1[idos1 == 0.0])
+	elif idosmax < 0. or idosmin > 0.:
+		sys.stderr.write("Warning (get_fermi_energy): Fermi energy out of range (high resolution).\n")
+	else:
+		# If there are no zero values, do a linear interpolation between
+		# the negative and positive IDOS values closest to zero.
+		for i in range(0, len(ee1) - 1):
+			if idos1[i] <= 0 and idos1[i + 1] > 0:
+				ef0 = linzero(ee1[i], ee1[i + 1], idos1[i], idos1[i + 1])
+				break
+	if ef0 is None:
+		sys.stderr.write("Warning (get_fermi_energy): Unable to determine Fermi energy. Raising the temperature may solve this problem.\n")
+	return ef0
+
+### GENERIC DENSITY OF STATES FUNCTIONS ###
+
+def integrated_dos(
+		ei_data, erange, params, calculate_ef = True, radial = True, broadening = None):
+	"""Calculate integrated density of states from DiagData instance.
+
+	Arguments:
+	ei_data        DiagData instance. Eigenvalue (diagonalization) data.
+	erange         Tuple of 3 floats or array. Energy range (min, max, res) or
+	               energy values in meV.
+	params         PhysParams instance. The physical parameters.
+	calculate_ef   True or False. Whether to calculate Fermi energy.
+	radial         True or False. If True, interpret 1D momentum values as radii
+	               in polar coordinates. If True, consider data to be from a
+	               one-dimensional (= cartesian) geometry.
+	broadening     Broadening parameter.
+
+	Returns:
+	densitydata          DensityData instance, that contains the result.
+
+	If the calculation is (partially) unsuccessful, the return value may be
+	None, or some of the values in DensityData may not be set (equal to None).
+	"""
+	if ei_data is None:
+		sys.stderr.write("Warning (integrated_dos): No data.\n")
+		return None
+
+	if len(ei_data) < 1:
+		sys.stderr.write("Warning (integrated_dos): No data, for example due to too few momentum values.\n")
+		return None
+
+	## Energy values
+	ee = get_erange(erange)
+
+	## Parameters
+	if isinstance(broadening, (BroadeningFunction, MultiBroadening)):
+		broadening.eres_test(ee)
+	elif broadening is not None:
+		raise TypeError("Argument broadening must be a BroadeningFunction instance or None")
+
+	idos = int_dos_by_band(
+		ei_data, ee, return_dict = False, radial = radial,
+		broadening = broadening, electrons=True, holes=True)
+	if idos is None:
+		sys.stderr.write("Warning (integrated_dos): Unable to calculate DOS by band. Now trying fallback to (less accurate) counting method.\n")
+		idos = int_dos(ei_data, ee, radial = radial)
+	if idos is None:
+		sys.stderr.write("ERROR (integrated_dos): Failed to calculate DOS.\n")
+		return None
+
+	# Determine validity range: extrema of first valence and conduction subbands
+	val_rng = dos_validity_range(ei_data)
+
+	# Calculate Fermi energy at zero density, if requested (default = recommended = True)
+	ef0 = None
+	if calculate_ef:
+		ef0 = get_fermi_energy(
+			ei_data, idos, ee, radial = radial,
+			broadening = broadening)
+		if ef0 is not None:
+			print("Fermi energy at CNP (n=0): %g meV" % ef0)
+
+	# Store data
+	densitydata = DensityData(
+		ee, None, densdata=idos, kdim=params.kdim, validity_range=val_rng,
+		aligned_with_e0=ei_data.aligned_with_e0
+	)
+	densitydata.set_special_energies(ef0 = ef0)
+
+	if 'verbose' in sys.argv:
+		densitydata.print_verbose()
+
+	return densitydata
+
+
+def integrated_dos_by_band(
+	ei_data, erange, params, radial = True, broadening = None):
+	"""Calculate integrated density of states by band from DiagData instance.
+
+	Arguments:
+	ei_data        DiagData instance. Eigenvalue (diagonalization) data.
+	erange         Tuple of 3 floats or array. Energy range (min, max, res) or
+	               energy values in meV.
+	params         PhysParams instance. The physical parameters.
+	radial         True or False. If True, interpret 1D momentum values as radii
+	               in polar coordinates. If True, consider data to be from a
+	               one-dimensional (= cartesian) geometry.
+	broadening     Broadening parameter.
+
+	Returns:
+	densitydata_byband   Dict of DensityData instances. The keys are the band
+	                     labels. Only if by_band is True.
+
+	If the calculation is (partially) unsuccessful, the return value may be
+	None, or some of the values in DensityData may not be set (equal to None).
+	"""
+	if ei_data is None:
+		sys.stderr.write("Warning (integrated_dos_byband): No data.\n")
+		return None
+
+	if len(ei_data) < 1:
+		sys.stderr.write("Warning (integrated_dos_byband): No data, for example due to too few momentum values.\n")
+		return None
+
+	## Energy values
+	ee = get_erange(erange)
+
+	## Parameters
+	if isinstance(broadening, (BroadeningFunction, MultiBroadening)):
+		broadening.eres_test(ee)
+	elif broadening is not None:
+		raise TypeError("Argument broadening must be a BroadeningFunction instance or None")
+
+	idos = int_dos_by_band(
+		ei_data, ee, return_dict = True, radial = radial,
+		broadening = broadening, electrons=True, holes=True)
+
+	if idos is None:
+		sys.stderr.write("ERROR (integrated_dos_byband): Failed to calculate DOS by band.\n")
+		return None
+
+	return DensityDataByBand(
+		ee, None, densdata=idos, kdim=params.kdim, aligned_with_e0=ei_data.aligned_with_e0
+	)
+
+
+def local_integrated_dos(ei_data, erange, params, min_res=None, broadening=None):
+	"""Calculate local density of states from DiagData instance.
+
+	Arguments:
+	ei_data        DiagData instance. Eigenvalue (diagonalization) data.
+	erange         Tuple of 3 floats or array. Energy range (min, max, res) or
+	               energy values in meV.
+	params         PhysParams instance. The physical parameters.
+	min_res        Integer, float or None. If not None, the minimal resolution
+	               in the 'x' coordinate. Use interpolation if needed.
+	broadening     BroadeningFunction or None. The broadening parameter.
+
+	Returns:
+	densitydata    DensityData instance with the result.
+	"""
+	if ei_data is None:
+		sys.stderr.write("Warning (local_integrated_dos): No data.\n")
+		return None
+
+	if len(ei_data) < 1:
+		sys.stderr.write("Warning (local_integrated_dos): No data, for example due to too few momentum values.\n")
+		return None
+
+	if min_res is not None:
+		ei_data_ip = data_interpolate_for_ldos(ei_data, min_res)
+	else:
+		ei_data_ip = ei_data
+
+	## Energy values
+	ee = get_erange(erange)
+
+	## Parameters
+	if isinstance(broadening, (BroadeningFunction, MultiBroadening)):
+		broadening.eres_test(ee)
+	elif broadening is not None:
+		raise TypeError("Argument broadening must be a BroadeningFunction instance or None")
+
+	if isinstance(ei_data_ip.grid, VectorGrid) and ei_data_ip.gridvar == 'k':
+		kval = ei_data_ip.grid
+	else:
+		kval = ei_data_ip.get_momenta()
+	lidos = loc_int_dos(ei_data_ip, ee, broadening = broadening)
+	if lidos is None:
+		sys.stderr.write("ERROR (local_integrated_dos): Failed to calculate local integrated DOS.\n")
+		return None
+
+	densitydata = DensityData(
+		ee, kval, densdata = lidos, kdim = params.kdim,
+		aligned_with_e0=ei_data.aligned_with_e0
+	)
+
+	if 'verbose' in sys.argv:
+		densitydata.print_verbose()
+
+	return densitydata
+
+
+def integrated_dos_ll(ei_data, erange, params, min_res=None, broadening=None):
+	"""Calculate integrated density of states from DiagData instance for Landau-level mode.
+
+	Arguments:
+	ei_data        DiagData instance. Eigenvalue (diagonalization) data.
+	erange         Tuple of 3 floats or array. Energy range (min, max, res) or
+	               energy values in meV.
+	params         PhysParams instance. The physical parameters.
+	min_res        Integer, float or None. If not None, the minimal resolution
+	               in the 'x' coordinate. Use interpolation if needed.
+	broadening     Broadening parameter.
+
+	Returns:
+	bval      List of Vector or float instances. The magnetic field values of
+	          the output.
+	ee        Numpy array. The energy values.
+	lidos     Numpy array. Integrated DOS at these energy values as function
+	          of the magnetic field.
+
+	If the calculation is (partially) unsuccessful, all or some of the return
+	values may be None.
+	"""
+	if 'verbose' in sys.argv:
+		print('integrated_dos_ll: broadening', broadening)
+	if ei_data is None:
+		sys.stderr.write("ERROR (integrated_dos_ll): No data.\n")
+		exit(1)
+
+	if len(ei_data) < 1:
+		sys.stderr.write("Warning (integrated_dos_ll): No data, for example due to too few momentum values.\n")
+		return None
+
+	## Energy values
+	ee = get_erange(erange)
+
+	if min_res is not None:
+		ei_data_ip = data_interpolate_for_ldos(ei_data, min_res)
+		if isinstance(broadening, (BroadeningFunction, MultiBroadening)):
+			broadening = broadening.interpolate_width(len(ei_data_ip))
+	else:
+		ei_data_ip = ei_data
+
+	## Magnetic field values
+	bval = ei_data_ip.get_paramval()
+	if isinstance(bval, VectorGrid):
+		bzval = bval.get_values('bz')
+	elif isinstance(bval, list) and len(bval) > 0 and isinstance(bval[0], Vector):
+		bzval = [b.z() for b in bval]
+	elif isinstance(bval, list) and len(bval) > 0 and isinstance(bval[0], (float, np.floating, int, np.integer)):
+		bzval = bval
+	else:
+		raise TypeError("Invalid input for bval")
+
+	## Broadening
+	if isinstance(broadening, (BroadeningFunction, MultiBroadening)):
+		broadening.eres_test(ee)
+	elif broadening is not None:
+		raise TypeError("Argument broadening must be a BroadeningFunction instance or None")
+
+	## Multiply by LL degeneracy (inverse area per LL)
+	ll_inv_area = np.abs(bzval) * eoverhbar / (2. * np.pi)
+	lidos1 = loc_int_dos(ei_data_ip, ee, broadening = broadening)
+	if lidos1 is None:
+		sys.stderr.write("ERROR (local_integrated_dos_ll): Failed to calculate local integrated DOS.\n")
+		return None
+	lidos = lidos1 * ll_inv_area[:, np.newaxis]
+
+	densitydata = DensityData(
+		ee, bval, densdata = lidos, kdim = params.kdim, ll = True,
+		aligned_with_e0=ei_data.aligned_with_e0
+	)
+
+	if 'verbose' in sys.argv:
+		densitydata.print_verbose()
+
+	return densitydata
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/density/densitydata.py b/kdotpy-v1.0.0/src/kdotpy/density/densitydata.py
new file mode 100644
index 0000000000000000000000000000000000000000..a3ab45b8652b196311b260a0d96e127d05c4b587
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/density/densitydata.py
@@ -0,0 +1,822 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+import sys
+
+from ..config import get_config
+from ..physconst import eoverhbar
+from ..momentum import VectorGrid
+from ..types import DiagData
+from ..phystext import format_unit
+
+from .densityscale import DensityScale
+
+
+### TOOL / HELPER FUNCTIONS ###
+energies_text = {
+	'e0': 'E_CNP (k=0)',
+	'ef0b': 'E_F(n=0) (bands)',
+	'ef0': 'E_F(n=0)',
+	'ef': 'E_F(n)',
+	'mu0': 'mu(n=0)',
+	'mu': 'mu(n)',
+}
+
+def print_energies_dict(energies, at_density = None, density_offset = None, kdim = 2, stream = sys.stdout):
+	if not isinstance(energies, dict):
+		raise TypeError("Argument 'energies' must be a dict")
+	if not any(k in energies for k in energies_text):
+		sys.stderr.write("Warning (print_energies): Argument energies does not contain any of the expected special energies.\n")
+		return
+	stream.write("\nSpecial energy values:\n")
+	for k, txt in energies_text.items():
+		if k in energies:
+			space = " " * (22 - len(k) - len(txt))
+			stream.write(f"{txt}{space}[{k}]: {energies[k]:8.3f} meV\n")
+	for k in sorted(energies):
+		if k not in energies_text:
+			stream.write("E*%s [%s]: %8.3f meV \n" % (" " * max(14 - len(k), 0), k, energies[k]))
+	if at_density is not None:
+		stream.write('At density           [n]: %8g nm^%i\n' % (at_density, -kdim))
+	if density_offset is not None:
+		stream.write('Density offset      [Δn]: %8g nm^%i\n' % (density_offset, -kdim))
+	stream.write('\n')
+	return
+
+def dos_validity_range(data):
+	"""Calculate validity range of DOS
+
+	Use the following method: Find the outward gradient of all the edges of
+	the momentum range. Find the minimum energy of all points 'going upwards'.
+	Since energies above this value are not considered, this is an upper limit
+	of the validity range. Likewise, the lower limits are found as the maximum
+	energy of all points 'going downwards'. Iterate over all bands and all outer
+	edges of the momentum range.
+
+	Arguments:
+	data        DiagData instance (dispersion data)
+
+	Returns:
+	[llim, ulim]   List of two floats. The energies between which the
+	               (integrated) DOS can be considered valid.
+	"""
+	bindex = data.get_all_bindex()
+	all_llim = []
+	all_ulim = []
+	ndim = len(data.shape)
+	if ndim not in [1, 2, 3]:
+		raise ValueError("Invalid number of dimensions")
+	data_indexing = "index" if ndim == 1 else "index2d" if ndim == 2 else "index3d" if ndim == 3 else ""
+	comp = data.grid.get_components()
+	verbose = 'verbose' in sys.argv
+	for b in bindex:
+		_, ei = data.get_plot_coord(b, data_indexing)
+		if np.count_nonzero(np.isnan(ei)) > 0:
+			continue  # Skip bands that contain NaN values
+		for ax in range(0, ndim):  # Iterate over grid axes
+			co = comp[ax]
+			if co.endswith('phi') or co.endswith('theta'):
+				continue  # Do not consider axes corresponding to angular coordinates
+			for pos in [0, -1]:  # Iterate over left and right edge
+				# Only consider points where coordinate value is nonzero
+				if np.abs(data.grid.values[ax][pos]) < 1e-9:
+					continue
+				index0 = tuple(pos if a == ax else slice(None, None, None) for a in range(0, ndim))
+				if verbose:
+					index0str = str(index0).replace('slice(None, None, None)', ':')
+					print("band=%i axis=%s, pos=%s, index0=%s" % (b, comp[ax], 'L' if pos == 0 else 'R', index0str))
+				## Find positions at the edge of the momentum range where
+				## dispersion goes up / down
+				grad = np.gradient(ei, axis = ax)
+				lr = -1 if pos == 0 else 1  # factor for 'left' or 'right' edge
+				upsel = (lr * grad[index0]) > 0
+				dnsel = (lr * grad[index0]) < 0
+
+				## Find minimum of energies at 'upwards' points; this is an upper limit
+				if np.count_nonzero(upsel) > 0:
+					all_ulim.append(np.amin(ei[index0][upsel]))
+				## Find maximum of energies at 'downwards' points; this is a lower limit
+				if np.count_nonzero(dnsel) > 0:
+					all_llim.append(np.amax(ei[index0][dnsel]))
+	if verbose:
+		print('Upper limits', ", ".join(["%7.3f" % x for x in all_ulim]))
+		print('Lower limits', ", ".join(["%7.3f" % x for x in all_llim]))
+
+	ulim = None if len(all_ulim) == 0 else min(all_ulim)
+	llim = None if len(all_llim) == 0 else max(all_llim)
+	if ulim is None:
+		sys.stderr.write("Warning (dos_validity_range): Unable to determine upper limit.\n")
+	if llim is None:
+		sys.stderr.write("Warning (dos_validity_range): Unable to determine lower limit.\n")
+	if ulim is not None and llim is not None and ulim < llim:
+		sys.stderr.write("Warning (dos_validity_range): Upper limit smaller than lower limit. Density of states result may not be valid anywhere. Consider increasing momentum range.\n")
+
+	return [llim, ulim]
+
+def data_interpolate_for_ldos(ei_data, min_res, obs = False):
+	"""Interpolate data for local density of states
+
+	Arguments:
+	ei_data     DiagData instance. Dispersion or field-dependence data.
+	min_res     Float. The minimal desired resolution for the 'x' coordinate.
+	obs         True or False. Whether to interpolate values of observables too.
+
+	Returns:
+	ei_data_ip  DiagData instance. The interpolated data.
+	"""
+	if ei_data is None:
+		sys.stderr.write("Warning (data_interpolate_for_ldos): Interpolation of data requires ei_data to be a DiagData instance.\n")
+	elif not isinstance(ei_data, DiagData):
+		raise TypeError("Argument ei_data must be a DiagData instance.")
+	if not isinstance(min_res, (int, np.integer)):
+		raise TypeError("Argument min_res must be a positive integer.")
+	elif min_res < 1:
+		raise ValueError("Argument min_res must be a positive integer.")
+
+	if len(ei_data) < min_res:
+		subdiv = int(np.ceil(min_res / (len(ei_data) - 1)))
+		try:
+			ei_data_ip = ei_data.interpolate(subdiv, obs = obs)
+		except:
+			sys.stderr.write("ERROR (data_interpolate_for_ldos): Interpolation of data for local DOS has failed.\n")
+			raise
+			# Alternatively: ei_data_ip = ei_data
+	else:
+		ei_data_ip = ei_data
+	return ei_data_ip
+
+
+### IDOS VS ENERGY SOLVERS ###
+
+def idos_at_energy(
+	ee, ee_data, idos_data, validity_range = None, suppress_warning = False):
+	"""Get integrated density of states (IDOS) at a given energy.
+
+	The desired energy ee need not be a point in the energy array, thus use
+	interpolation of the energy dependence of the IDOS.
+	The function energy_at_idos() is the "inverse" of this function.
+
+	Arguments:
+	ee                Float. Target energy for which to return the IDOS values.
+	ee_data           Numpy array. Energy array for the IDOS values.
+	idos_data         Numpy array. IDOS values.
+	validity_range    List of 2 floats or None. If set, raise a warning if the
+	                  requested energy ee lies outside this range.
+	suppress_warning  True or False. If True, do not issue the validity range
+	                  warning at all.
+
+	Returns:
+	idos              Float. Value of the integrated density of states at energy
+	                  ee.
+	"""
+	if ee is None or ee < ee_data.min() or ee > ee_data.max():
+		if not suppress_warning:
+			sys.stderr.write("Warning (idos_at_energy): Requested energy value is out of range for this dispersion.\n")
+		return None
+	if not suppress_warning and validity_range is not None:
+		if (validity_range[0] is not None and ee < validity_range[0]) or (validity_range[1] is not None and ee > validity_range[1]):
+			sys.stderr.write("Warning (idos_at_energy): Requested energy value is in range for this dispersion, but due to insufficient data, the result may be incorrect.\n")
+
+	idos = np.interp(ee, ee_data, idos_data)
+	return idos
+
+def energy_at_idos(
+	idos, ee_data, idos_data, validity_range = None, suppress_warning = False):
+	"""Get energy at a given value of the integrated density of states (IDOS).
+
+	Given n(E), the IDOS n as function of energy E, solve energy E0 from the
+	equation n(E0) = n0, where n0 is a given IDOS value.
+	The function idos_at_energy() is the "inverse" of this function.
+
+	Arguments:
+	idos              Float. Target IDOS n0 for which to solve n(E0) = n0.
+	ee_data           Numpy array. Energy array for the IDOS values.
+	idos_data         Numpy array. IDOS values.
+	validity_range    List of 2 floats or None. If set, raise a warning if the
+	                  requested energy ee lies outside this range.
+	suppress_warning  True or False. If True, do not issue the validity range
+	                  warning at all.
+
+	Returns:
+	ee                Float. Energy value where the integrated density of states
+	                  reaches the value set by argument idos.
+	"""
+	if idos_data.shape[-1] != len(ee_data):
+		raise ValueError("Sizes of input arrays do not match")
+
+	debug = False  # set to True for debug output
+	if debug:
+		print("energy at idos:", end = ' ')
+		print("Interpolate IDOS %g in [%g, %g]" % (idos, min(idos_data), max(idos_data)))
+	n_smaller = np.count_nonzero(idos_data < idos)
+	n_larger = np.count_nonzero(idos_data > idos)
+	n_data = len(idos_data)
+	if n_smaller == 0 or n_larger == 0:
+		if not suppress_warning:
+			sys.stderr.write("Warning (energy_at_idos): Requested IDOS value (density) is out of range for this dispersion.\n")
+		return None
+	elif n_smaller + n_larger == n_data:
+		e1, e2 = ee_data[n_smaller - 1], ee_data[n_data - n_larger]
+		i1, i2 = idos_data[n_smaller - 1], idos_data[n_data - n_larger]
+		ee = 0.5 * (e1 + e2) if i1 == i2 else e1 + (e2 - e1) * (idos - i1) / (i2 - i1)
+	else:
+		ee = 0.5 * (ee_data[n_smaller] + ee_data[n_data - n_larger - 1])
+
+	if not suppress_warning and validity_range is not None:
+		if (validity_range[0] is not None and ee < validity_range[0]) or (validity_range[1] is not None and ee > validity_range[1]):
+			sys.stderr.write("Warning (energy_at_idos): Requested IDOS value (density) is in range for this dispersion, but due to insufficient data, the result may be incorrect.\n")
+
+	return ee
+
+### CLASS DEFINITIONS ###
+
+class DensityData:
+	"""Container class for density of states data.
+
+	Attributes:
+	ee                Numpy array of one dimension with increasing values.
+	                  Energies in meV.
+	xval              None, list/array, VectorGrid. Momentum or B values (for
+	                  local DOS), band indices (for DOS by band).
+	dx                None or array. Volume elements.
+	densdata          Data (IDOS), always in the same unit (in nm^-d)
+	kdim              1, 2, or 3. Geometrical dimension d.
+	ll                True or False. Whether LL density or momentum density.
+	int_obs           True or False. Whether the input is an integrated
+	                  observable. If True, it will thus not be multiplied by
+	                  1 / (2 pi)^d. (__init__ argument not stored as attribute.)
+	validity_range    2-tuple. Range of energies where the data can be trusted.
+	scale             DensityScale instance or None. Determines quantity and
+	                  units for output.
+	special_energies  Dict instance or None. Special energy value parsed by
+	                  energies_dict(). CNP, E_Fermi, etc.
+	aligned_with_e0   True or False. Whether this instance has been generated
+	                  from data where the band indices were aligned with the
+		              zero energy E0. The value is inherited from the DiagData
+		              instance from which the density data is obtained. If not
+		              set, use default value False.
+	"""
+	def __init__(
+		self, ee, xval, dx = None, densdata = None, kdim = None, ll = False,
+		int_obs = False, validity_range = None, scale = None,
+		special_energies = None, aligned_with_e0 = False):
+		if not isinstance(ee, np.ndarray):
+			raise TypeError("Argument ee must be a numpy array.")
+		if not np.amin(np.diff(ee)) > 0:
+			raise ValueError("Argument ee must be a numpy array of increasing numbers.")
+		self.ee = ee
+		if xval is None:
+			self.xval = None
+		elif isinstance(xval, list):
+			self.xval = np.asarray(xval)
+		elif isinstance(xval, (np.ndarray, VectorGrid)):
+			self.xval = xval
+		else:
+			raise TypeError("Argument xval must be a list/array of numbers or Vector instances, or a VectorGrid, or None")
+		if kdim in [1, 2, 3]:
+			self.kdim = kdim
+		else:
+			raise ValueError("Argument kdim must be set to 1, 2, or 3.")
+		self.dx = dx
+		if isinstance(densdata, np.ndarray):
+			if ll or int_obs:
+				self.densdata = densdata
+			else:
+				# If the input is a volume in k space (like the output from
+				# functions in densitybase.py) in dispersion mode, convert
+				# to 'real' DOS.
+				self.densdata = densdata / (2 * np.pi)**self.kdim
+		else:
+			raise TypeError("Argument densdata must be a numpy array.")
+		# (assume self.xval is an array, a VectorGrid instance, or None)
+		xshape = () if self.xval is None else self.xval.shape
+		self.shape = (*xshape, *self.ee.shape)
+		if self.densdata.shape != self.shape:
+			raise ValueError("Shapes of xval, ee do not match densdata: %s %s versus %s" % (xshape, self.ee.shape, self.densdata.shape))
+		self.ll = ll
+		self.validity_range = validity_range
+		if scale is None:
+			self.scale = None
+		elif isinstance(scale, DensityScale):
+			self.scale = scale
+		elif isinstance(scale, str):
+			self.scale = DensityScale(self.densdata, scale, unit = 'nm', kdim = self.kdim, ll = self.ll)
+		else:
+			raise TypeError("Argument scale must be a DensityScale instance, a string, or None")
+		if special_energies is None:
+			special_energies = {}
+		elif not isinstance(special_energies, dict):
+			raise TypeError("Argument special_energies must be a dict or None")
+		self.special_energies = {key: val for key, val in special_energies.items() if val is not None}  # Exclude None values
+		self.aligned_with_e0 = aligned_with_e0
+		self.strategy_no_e0 = get_config('dos_strategy_no_e0', choices=['strict', 'dos', 'ignore'])
+
+	def copy(self):
+		"""Return a shallow copy of the present instance"""
+		return DensityData(
+			self.ee, self.xval, dx = None, densdata = self.densdata,
+			kdim = self.kdim, ll = self.ll, validity_range = self.validity_range,
+			scale = self.scale, special_energies = self.special_energies,
+			aligned_with_e0 = self.aligned_with_e0)
+
+	def integrate_x(self, inplace = False):
+		"""Integrate over the x (k or B) axis
+
+		Argument:
+		in_plane   True or False. If True, 'collapse' the x axis in the present
+		           instance. If False, return a new instance.
+
+		Returns:
+		densitydata   The present or a new DensityData instance.
+		"""
+		intx_dens = np.dot(self.dx, self.densdata)
+		obj = self if inplace else self.copy()
+		obj.densdata = intx_dens
+		obj.xval = None
+		obj.dx = None
+		return obj
+
+	def get_dos(self, scaled = False, derivative = 'gradient'):
+		"""Get density of states (DOS)
+
+		Argument:
+		scaled       True or False. If True, return scaled values is self.scale
+		             is set.
+		derivative   'gradient' or 'diff'. Use the corresponding numpy functions
+		             for calculating the discrete derivative. Note that using
+		             'diff', the resulting array will be smaller by 1 entry.
+
+		Returns:
+		dos   Numpy array.
+		"""
+		if not self.aligned_with_e0 and self.strategy_no_e0 == 'strict':
+			return None
+
+		# In the following, broadcast of self.ee should happen automatically
+		if derivative == 'gradient':
+			dosval = np.gradient(self.densdata, axis = -1) / np.gradient(self.ee)
+		elif derivative == 'diff':
+			dosval = np.diff(self.densdata, axis = -1) / np.diff(self.ee)
+		else:
+			raise ValueError("Argument derivative must be 'gradient' or 'diff'")
+		if scaled and self.scale is not None:
+			return self.scale.scaledvalues(dosval)
+		else:
+			return dosval
+
+	def get_idos(self, scaled = False):
+		"""Get the integrated density of states (IDOS)."""
+		if not self.aligned_with_e0 and self.strategy_no_e0 in ['strict', 'dos']:
+			return None
+
+		if scaled and self.scale is not None:
+			return self.scale.scaledvalues(self.densdata)
+		else:
+			return self.densdata
+
+	def xyz_dos(self, **kwds):
+		"""Convenience function for parsing data to plot/table functions.
+		Use plot_function(*densitydata.xyz_dos(), ...)
+		"""
+		return self.xval, self.ee, self.get_dos(**kwds)
+
+	def xyz_idos(self, **kwds):
+		"""Convenience function for parsing data to plot/table functions.
+		Use plot_function(*densitydata.xyz_idos(), ...)
+		"""
+		return self.xval, self.ee, self.get_idos(**kwds)
+
+	def get_numeric_dos_ll(self, method = 'derivative', component = 'b'):
+		"""Calculate numerical DOS from the magnetic-field derivative of the local integrated DOS.
+		See there for more information.
+
+		Arguments:
+		method      'derivative' or 'division'. If 'derivative', return the
+		            magnetic field derivative of the local integrated DOS. If
+		            'division', return local integrated DOS divided by magnetic
+		            field.
+		component   String. Which vector component to take from the grid values.
+
+		Returns:
+		dlidos      Numpy array of two dimensions with the result.
+		"""
+		if not self.ll:
+			sys.stderr.write("Warning (DensityData.numeric_dos_ll): This function is appropriate only in LL mode.\n")
+		b = self.xval.get_values(component) if isinstance(self.xval, VectorGrid) else np.asarray(self.xval)
+		if method == 'derivative':
+			if self.xval is None or len(self.xval) < 3:
+				sys.stderr.write("ERROR (DensityData.numeric_dos_ll): Data array has too few elements.\n")
+				return None
+			db = 0.5 * (b[2:] - b[:-2])
+			dlidos_first = (self.densdata[1:2, :] - self.densdata[0:1, :]) / db[0]
+			dlidos_bulk = (self.densdata[2:, :] - self.densdata[:-2, :]) / 2 / db[:, np.newaxis]
+			dlidos_last = (self.densdata[-1:, :] - self.densdata[-2:-1, :]) / db[-1]
+			dlidos = np.concatenate((dlidos_first, dlidos_bulk, dlidos_last))
+			return dlidos * 2. * np.pi / eoverhbar
+		elif method == 'divide' or method == 'division':
+			with np.errstate(invalid = 'ignore'):  # suppress 'invalid value' warnings
+				dlidos = self.densdata / b[:, np.newaxis]
+			return dlidos * 2. * np.pi / eoverhbar
+		else:
+			raise ValueError("Argument method must be 'derivative' or 'division' (alias 'divide')")
+
+	def get_validity_range(self):
+		"""Get lower and upper bound of the validity range (possibly None)"""
+		if self.validity_range is None:
+			return None, None
+		else:
+			return tuple(self.validity_range)
+
+	def print_validity_range(self, fmt = '%.3f meV'):
+		"""Print validity range
+
+		Argument:
+		fmt    The format function for printing energy values (of type float).
+
+		No return value
+		"""
+		if self.validity_range is None:
+			return "??"
+		llim_str = '??' if self.validity_range[0] is None else (fmt % self.validity_range[0])
+		ulim_str = '??' if self.validity_range[1] is None else (fmt % self.validity_range[1])
+		return "[%s, %s]" % (llim_str, ulim_str)
+
+	def set_scale(self, scale, unit = None, *, limits = None, scaled_limits = None, autoscale = True):
+		"""Scale density automatically according to the given limits or to the IDOS values"""
+		if isinstance(scale, DensityScale) and unit is None:
+			self.scale = scale
+		elif isinstance(scale, str) and isinstance(unit, str):
+			qty = scale  # alias; this argument acts as qty if unit is set
+			if scaled_limits is not None:
+				if limits is not None:
+					raise ValueError("Arguments limits and scaled_limits may not be given simultaneously")
+				self.scale = DensityScale(scaled_limits, qty, unit, kdim = self.kdim, ll = self.ll, scaledinput = True)
+			elif limits is not None:
+				self.scale = DensityScale(limits, qty, unit, kdim = self.kdim, ll = self.ll, scaledinput = True)
+			else:
+				self.scale = DensityScale(self.densdata, qty, unit, kdim = self.kdim, ll = self.ll)
+		elif scale is None:
+			self.scale = None  # reset
+		else:
+			raise TypeError("Positional arguments may be a DensityScale instance, two strings, or None.")
+		return self
+
+	def get_scale(self):
+		"""Scale density automatically according to the given limits"""
+		return self.scale
+
+	def scaledvalues(self, values):
+		"""Wrapper around DensityScale.scaledvalues()"""
+		return values if self.scale is None else self.scale.scaledvalues(values)
+
+	def unitstr(self, style = 'raw', integrated = True, scaled = False, negexp = False):
+		"""Wrapper around DensityScale.unitstr()"""
+		if (not scaled) or self.scale is None:
+			if integrated:
+				return format_unit(('nm', -self.kdim), style = style, negexp = negexp)
+			else:
+				return format_unit(('nm', -self.kdim), ('meV', -1), style = style, negexp = negexp)
+		else:
+			return self.scale.unitstr(style = style, integrated = integrated, negexp = negexp)
+
+	def qstr(self, style = 'raw', integrated = True, scaled = False):
+		"""Wrapper around DensityScale.qstr()"""
+		if (not scaled) or self.scale is None:
+			return "IDOS" if integrated else "DOS"
+		else:
+			return self.scale.qstr(style = style, integrated = integrated)
+
+	def set_special_energies(self, **kwds):
+		"""Set special energies; input them as keyword arguments"""
+		for key, val in kwds.items():
+			if val is None:  # ignore None values
+				continue
+			if key not in ['e0', 'ecnp', 'ef', 'ef0', 'mu', 'mu0']:
+				sys.stderr.write("Warning (DensityData.set_special_energies): Label '%s' not recognized as special energy.\n" % key)
+			self.special_energies.update({key: val})
+		return self
+
+	def get_special_energies(self):
+		"""Get special energies"""
+		return self.special_energies
+
+	def print_special_energies(self, at_density = None, density_offset = None, stream = None):
+		"""Print special energies. See print_energies_dict()."""
+		if stream is None:
+			stream = sys.stdout
+		print_energies_dict(self.special_energies, at_density = at_density, density_offset = density_offset, kdim = self.kdim, stream = stream)
+
+	def idos_at_energy(self, ee, save_as = None, suppress_warning = False):
+		"""Get integrated density of states (IDOS) at a given energy.
+		Wrapper for idos_at_energy(); see there.
+		"""
+		if not self.aligned_with_e0 and self.strategy_no_e0 in ['strict', 'dos']:
+			return None
+		if save_as is not None and isinstance(ee, (int, float, np.integer, np.floating)):
+			self.set_special_energies(**{save_as: ee})
+		return idos_at_energy(ee, self.ee, self.densdata, validity_range = self.validity_range, suppress_warning = suppress_warning)
+
+	def energy_at_idos(self, idos, save_as = None, suppress_warning = False):
+		"""Get energy at a given value of the integrated density of states (IDOS) / carrier density.
+		Wrapper for energy_at_idos(); see there.
+		"""
+		if not self.aligned_with_e0 and self.strategy_no_e0 in ['strict', 'dos']:
+			return None
+		ee = energy_at_idos(idos, self.ee, self.densdata, validity_range = self.validity_range, suppress_warning = suppress_warning)
+		if save_as is not None and isinstance(ee, (int, float, np.integer, np.floating)):
+			self.set_special_energies(**{save_as: ee})
+		return ee
+
+	def energy_at_dos_ll(self, idos, do_ldos = False, subdiv = 5):
+		"""Get energy at a given value of the integrated density of states (IDOS), for Landau-level mode
+
+		Given n(B, E), the IDOS n as function of magnetic field B and energy E,
+		solve energy E0(B) from the	equation n(E0(B), B) = n0, where n0 is a
+		given IDOS value. Also calculate DOS at these energies.
+
+		Arguments:
+		idos           Float or integer or a list/array of these. If numerical,
+			           the target IDOS n0 for which solve for E0(B). If a
+			           list/array, iterate over the values.
+		subdiv         Integer. If larger than 1, interpolate the data of
+			           ei_data with subdiv - 1 values between the existing data
+			           points.
+
+		Returns:
+		idos           Numpy array, containing the input values.
+		ee_results     Numpy array of dimension 2. The energy values E0(B), for
+			           each value in IDOS.
+		ldos_results   Numpy array of dimension 2. Density of states at E0(B),
+			           for each value in IDOS. It has the same size as
+			           ee_results.
+		"""
+		if not self.aligned_with_e0 and self.strategy_no_e0 in ['strict', 'dos']:
+			return None, None, None
+		if isinstance(idos, (float, np.floating, int, np.integer)):
+			idos = np.array([idos])
+		elif isinstance(idos, list) or isinstance(idos, np.ndarray):
+			idos = np.asarray(idos)
+		else:
+			sys.stderr.write("ERROR (DensityData.energy_at_dos_ll): Input value idos must be a number (float) or a list/array.\n")
+			exit(1)
+
+		# Subdivide (interpolate) in the magnetic field direction
+		if subdiv > 1:
+			densdata_ip = np.array([
+				(1. - j / subdiv) * self.densdata[:-1, :] + (j / subdiv) * self.densdata[1:, :]
+				for j in range(0, subdiv)])
+			densdata_ip = np.concatenate((np.hstack(densdata_ip.transpose(1, 2, 0)).transpose(), self.densdata[-1:, :]), axis=0)
+		else:
+			densdata_ip = self.densdata
+
+		if do_ldos:
+			ldos_data = np.diff(densdata_ip, axis = 1) / np.diff(self.ee)
+		else:
+			ldos_data = None
+
+		ee_results = []
+		ldos_results = []
+		for idosval in idos:
+			ee_result = [energy_at_idos(idosval, self.ee, idos_data, suppress_warning = True) for idos_data in densdata_ip]
+			ee_results.append(np.array(ee_result, dtype = float))
+			if do_ldos:
+				ee_data1 = 0.5 * (self.ee[1:] + self.ee[:-1])
+				ldos_result = [float("nan") if ee != ee else idos_at_energy(ee, ee_data1, ldos, suppress_warning = True) for ee, ldos in zip(ee_result, ldos_data)]
+				# Actually, abuse of notation, because we use idos_at_energy with DOS instead of IDOS
+				# ee != ee detects "nan"
+				ldos_results.append(np.array(ldos_result, dtype = float))
+
+		# TODO: Define a new container class for return value
+		return np.array(idos), np.array(ee_results), np.array(ldos_results) if do_ldos else None
+
+	def offset(self, e_cnp = None, n_offset = None, inplace = True):
+		"""Shift by energy or density offset.
+
+		Arguments:
+		e_cnp          Float. Position of the charge neutrality point.
+		n_offset       Float. Density offset (shifts charge neutrality point).
+		inplace        True or False. If True, return the same DensityData
+		               instance with modified values. If False, return a new
+		               instance.
+
+		Returns:
+		densitydata    DensityData instance. Either this instance or a new one.
+		"""
+		if e_cnp is None and n_offset is None:  # do nothing
+			return self
+		if e_cnp is not None and n_offset is not None:
+			raise ValueError("Arguments e_cnp and n_offset may not be set at the same time.")
+
+		# Shift using energy or density input, respectively
+		if e_cnp is not None:
+			if n_offset is not None:
+				sys.stderr.write("Warning (DensityData.offset): With argument e_cnp being set, argument n_offset is ignored.\n")
+			n_offset = self.idos_at_energy(e_cnp)  # Density offset
+			if n_offset is None:
+				sys.stderr.write("ERROR (DensityData.offset): Requested charge neutrality out of range or undefined (energy input).\n")
+				return self
+			idos_new = self.densdata - n_offset
+		elif n_offset is not None:
+			idos_new = self.densdata - n_offset
+			e_cnp = self.energy_at_idos(n_offset)
+			if e_cnp is None:
+				sys.stderr.write("Warning (DensityData.offset): Requested charge neutrality out of range or undefined (density input).\n")
+				return self
+		else:
+			raise ValueError("The values for e_cnp and n_offset must not be both None")
+
+		# Adapt or invalidate special energies
+		special_energies_new = {}
+		if 'ef0' in self.special_energies:
+			special_energies_new['ef0b'] = self.special_energies['ef0']
+		if e_cnp is not None:
+			special_energies_new['ef0'] = e_cnp
+		else:
+			sys.stderr.write("Warning (DensityData.offset): Cannot adapt special energy ef because energy shift cannot be calculated.\n")
+		if 'e0' in self.special_energies:
+			special_energies_new['e0'] = self.special_energies['e0']
+
+		invalidated_keys = [key for key, val in self.special_energies.items() if key not in ['ef0', 'ef0b', 'e0'] and val is not None]
+		if len(invalidated_keys) == 1:
+			sys.stderr.write("Warning (DensityData.offset): Special energy %s is invalidated by energy/density shift.\n" % invalidated_keys[0])
+		elif len(invalidated_keys) > 1:
+			sys.stderr.write("Warning (DensityData.offset): Special energies %s are invalidated by energy/density shift.\n" % ", ".join(invalidated_keys))
+
+		obj = self if inplace else self.copy()
+		obj.densdata = idos_new
+		obj.special_energies = special_energies_new
+		return obj
+
+	def pushforward(self, other, values):
+		"""Push forward by another density function.
+		Let 'self' define f(E) and 'other' define g(E), then return f @ g^-1,
+		where @ denotes composition. The typical use case would be to extract an
+		integrated observable as function of density n. For this purpose, define
+		'self' as the integrated observable as function of E [O = f(E)] and
+		'other' as the IDOS	as function of E as other [n = g(E)]. Then this
+		method returns O(n) = f(g^-1(n)) = (f @ g^-1)(n).
+
+		Arguments:
+		other    DensityData instance that defines the function g(E).
+		values   Number or array. Value(s) at which the pushforward function
+		         should be evaluated.
+
+		Returns:
+		pushfwd  Numpy array with shape (len(xval), len(values)).
+		"""
+		if not isinstance(other, DensityData):
+			raise TypeError("Argument other must be a DensityData instance")
+		if self.xval is None and other.xval is None:
+			pass
+		elif self.xval.shape != other.xval.shape:
+			raise ValueError("x values do not match (shape)")
+		elif not all(xs == xo for xs, xo in zip(self.xval, other.xval)):
+			raise ValueError("x values do not match (values)")
+
+		if self.ee.shape == other.ee.shape and np.amax(np.abs(self.ee - other.ee)) < 1e-10:
+			# Equal energy values
+			# The for iteration is over x values
+			pushfwd = np.array([np.interp(values, gE, fE) for fE, gE in zip(self.densdata, other.densdata)])
+		else:
+			# Unequal energy values: If self defines f(E_i) and other g(E'_j)
+			# with different sets of energies. This requires an intermediate
+			# step to calculate g(E_i).
+			pushfwd = []
+			for fE, gEp in zip(self.densdata, other.densdata):  # iterate over x values
+				gE = np.interp(self.ee, other.ee, gEp)  # find g(E_i) from g(E'_j)
+				pushfwd.append(np.interp(values, gE, fE))
+		return np.asarray(pushfwd)
+
+	def print_verbose(self):
+		"""Verbose / debug output"""
+		print("DensityData attributes:")
+		all_att = [att for att in dir(self) if not att.startswith('__')]
+		for att in all_att:
+			val = getattr(self, att)
+			if not callable(val):
+				print("", att, type(val), val if isinstance(val, str) else str(val) if isinstance(val, (bool, int, float)) else val.shape if isinstance(val, (np.ndarray, VectorGrid)) else len(val) if isinstance(val, (list, tuple)) else '')
+
+
+class IntegratedObservable(DensityData):
+	"""Thin wrapper around DensityData class"""
+	def __init__(self, ee, xval, densdata = None, obs = None, **kwds):
+		super().__init__(ee, xval, densdata = densdata, int_obs = True, **kwds)
+		if obs is None:
+			raise ValueError("Argument obs must be set")
+		self.obs = obs
+
+class DensityDataByBand(DensityData):
+	"""Storage container for DensityData separated by band
+
+	The data per band is stored in the attribute densdata_dict (type dict), and
+	densdata holds the sum of these values.	Most member functions simply act on
+	densdata (the total value), but have a counterpart that iterates over
+	densdata_dict.
+	"""
+	def __init__(self, ee, xval, densdata = None, **kwds):
+		if not (isinstance(densdata, dict) and all(isinstance(v, np.ndarray) for v in densdata.values())):
+			raise TypeError("Argument densdata must be a dict of numpy arrays.")
+		densdata_total = sum(d for d in densdata.values() if d is not None)
+		super().__init__(ee, xval, densdata = densdata_total, **kwds)
+		self.densdata_dict = {
+			b: DensityData(ee, xval, densdata=d, **kwds) for b, d in densdata.items() if d is not None
+		}
+
+	def copy(self):
+		"""Return a shallow copy of the present instance"""
+		return DensityDataByBand(
+			self.ee, self.xval, dx = None, densdata = self.densdata_dict,
+			kdim = self.kdim, ll = self.ll, validity_range = self.validity_range,
+			scale = self.scale, special_energies = self.special_energies,
+			aligned_with_e0 = self.aligned_with_e0)
+
+	def get_dos_dict(self, *args, **kwds):
+		"""Iterator around DensityData.get_dos()"""
+		if not self.aligned_with_e0 and self.strategy_no_e0 == 'strict':
+			return None
+		return {b: d.get_dos(*args, **kwds) for b, d in self.densdata_dict.items()}
+
+	def get_idos_dict(self, *args, **kwds):
+		"""Iterator around DensityData.get_idos()"""
+		if not self.aligned_with_e0 and self.strategy_no_e0 in ['strict', 'dos']:
+			return None
+		return {b: d.get_idos(*args, **kwds) for b, d in self.densdata_dict.items()}
+
+	def get_numeric_dos_ll_dict(self, *args, **kwds):
+		"""Iterator around DensityData.get_numeric_dos_ll()"""
+		return {b: d.get_numeric_dos_ll(*args, **kwds) for b, d in self.densdata_dict.items()}
+
+	def get_validity_range_dict(self):
+		"""Iterator around DensityData.get_validity_range()"""
+		return {b: d.get_validity_range() for b, d in self.densdata_dict.items()}
+
+	def set_scale(self, scale, *args, **kwds):
+		"""Set scale for total and dict elements"""
+		super().set_scale(scale, *args, **kwds)
+		for d in self.densdata_dict.values():
+			d.set_scale(scale, *args, **kwds)
+		return self
+
+	def set_special_energies(self, **kwds):
+		"""Set special energies for total and dict elements"""
+		super().set_special_energies(**kwds)
+		for d in self.densdata_dict.values():
+			d.set_special_energies(**kwds)
+		return self
+
+	def offset(self, *args, **kwds):
+		"""Set offset for total and dict elements"""
+		super().offset(*args, **kwds)
+		for d in self.densdata_dict.values():
+			d.offset(*args, **kwds)
+		return self
+
+	def __getitem__(self, b):
+		return self.densdata_dict[b]
+
+	def values(self):
+		return self.densdata_dict.values()
+
+	def items(self):
+		return self.densdata_dict.items()
+
+	def __iter__(self):
+		return iter(self.densdata_dict)
diff --git a/kdotpy-v1.0.0/src/kdotpy/density/densityscale.py b/kdotpy-v1.0.0/src/kdotpy/density/densityscale.py
new file mode 100644
index 0000000000000000000000000000000000000000..67cd4fca6582c6308d6bd7e80d147fcb0c4514e2
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/density/densityscale.py
@@ -0,0 +1,187 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import sys
+import numpy as np
+from ..phystext import format_unit
+
+# Automatic scaling for DOS, IDOS, etc.
+qty_alias = {
+	'momentum': 'k',
+	'states': 's',
+	'dos': 's',
+	'particles': 'p',
+	'carriers': 'p',
+	'n': 'p',
+	'cardens': 'p',
+	'charge': 'e'
+}
+
+class DensityScale:
+	def __init__(self, inputvalues, qty, unit, *, autoscale = True, scaledinput = False, kdim = 2, ll = False):
+		if qty in ['k', 's', 'p', 'e'] or qty is None:
+			self.qty = qty
+		elif qty in qty_alias:
+			self.qty = qty_alias[qty]
+		else:
+			raise ValueError("Invalid value for argument qty")
+		if unit in ['nm', 'cm', 'm']:
+			self.unit = unit
+		else:
+			raise ValueError("Invalid value for argument unit")
+		if kdim not in [0, 1, 2, 3]:
+			raise ValueError("Invalid value for argument kdim")
+		self.kdim = kdim
+		self.ll = ll
+		if self.ll and self.kdim != 2:
+			sys.stderr.write("Warning (DensityScale): If ll is set to True, argument kdim is set to 2.\n")
+			self.kdim = 2
+		self.values = inputvalues
+		try:
+			self.minvalue = np.nanmin(inputvalues)
+			self.maxvalue = np.nanmax(inputvalues)
+		except:
+			raise ValueError("Input values are not numeric")
+
+		self.mult = 1.0
+		if self.qty == 'k':
+			self.mult *= (2.0 * np.pi)**self.kdim
+		if self.unit == 'cm':
+			self.mult *= 1e7**self.kdim
+		elif self.unit == 'm':
+			self.mult *= 1e9**self.kdim
+
+		maxabs = max(abs(self.minvalue), abs(self.maxvalue))
+		self.exp = 0
+		if qty is not None and scaledinput:
+			if self.unit == "nm" and maxabs > 1000:  # interpret input as cm^-d
+				self.minvalue /= 1e7**self.kdim
+				self.maxvalue /= 1e7**self.kdim
+				maxabs /= 1e7**self.kdim
+			elif self.unit == "cm" and maxabs < 1000:  # interpret input as nm^-d
+				self.minvalue *= 1e7**self.kdim
+				self.maxvalue *= 1e7**self.kdim
+				maxabs *= 1e7**self.kdim
+			elif self.unit == 'm' and maxabs < 1000:  # interpret input as nm^-d
+				self.minvalue *= 1e9**self.kdim
+				self.maxvalue *= 1e9**self.kdim
+				maxabs *= 1e9**self.kdim
+		if qty is not None and autoscale:
+			if np.isnan(self.maxvalue):
+				sys.stderr.write("Warning (DensityScale): Autoscale ignored because there are no numerical values.\n")
+				return
+			if not scaledinput:
+				maxabs *= self.mult
+			logmax = -10 if maxabs < 1e-10 else 0 + int(np.floor(np.log10(maxabs)))
+			if logmax >= 3 or logmax <= -3:
+				self.exp = logmax
+				self.mult /= 10**self.exp
+		if scaledinput:
+			self.scaledmin = self.minvalue / 10**self.exp
+			self.scaledmax = self.maxvalue / 10**self.exp
+		else:
+			self.scaledmin = self.minvalue * self.mult
+			self.scaledmax = self.maxvalue * self.mult
+
+	def scaledvalues(self, values = None):
+		if values is None:
+			return self.values * self.mult
+		elif isinstance(values, list):
+			return list(np.array(values) * self.mult)
+		else:
+			return values * self.mult
+
+	def unitstr(self, style = 'raw', integrated = True, negexp = False):
+		if self.qty is None:  # TODO: Should this be allowed
+			return ""
+		# Charge factor: 'e' (electron charge) or 0 (denotes 10^0 = 1)
+		qfac = 'e' if self.qty == 'e' else 0
+		lunit = self.unit  # length unit
+		if integrated:
+			ustr = format_unit(self.exp, qfac, (lunit, -self.kdim), style = style, negexp = negexp)
+		else:
+			ustr = format_unit(self.exp, qfac, (lunit, -self.kdim), ('meV', -1), style = style, negexp = negexp)
+		return ustr
+
+	def qstr(self, style = 'raw', integrated = True):
+		if self.qty is None:  # TODO: should this be allowed?
+			return ""
+		elif self.kdim == 0:
+			if integrated:
+				return {'none': None, 'false': None, 'raw': 'LIDOS', 'plain': 'n', 'unicode': 'n', 'tex': r"$n$"}[style]
+			else:
+				return {'none': None, 'false': None, 'raw': 'LDOS', 'plain': 'dn/dE', 'unicode': 'dn/dE', 'tex': r"$dn/dE$"}[style]
+		elif self.qty == 'k':  # volume in k space
+			if style == 'tex':
+				qstr = ['N', r'l_\mathrm{k}', r'A_\mathrm{k}', r'V_\mathrm{k}'][self.kdim]
+				return ("$%s$" % qstr) if integrated else ("$d%s/dE$" % qstr)
+			elif style in ['unicode', 'plain']:
+				qstr = ['N', 'l_k', 'A_k', 'V_k'][self.kdim]
+				return qstr if integrated else ("d%s/dE" % qstr)
+			elif style == 'raw':
+				return 'IDOS_k' if integrated else 'DOS_k'
+			else:
+				return None
+		elif self.qty == 'k':  # volume in k space
+			if integrated:
+				return {'none': None, 'false': None, 'raw': 'IDOS', 'plain': 'IDOS', 'unicode': 'IDOS', 'tex': r'$\mathrm{IDOS}'}[style]
+			else:
+				return {'none': None, 'false': None, 'raw': 'DOS', 'plain': 'DOS', 'unicode': 'DOS', 'tex': r'$\mathrm{DOS}'}[style]
+		elif self.qty == 'p':  # particle/carrier density
+			if integrated:
+				return {'none': None, 'false': None, 'raw': 'n', 'plain': 'n', 'unicode': 'n', 'tex': r"$n$"}[style]
+			else:
+				return {'none': None, 'false': None, 'raw': 'dn/dE', 'plain': 'dn/dE', 'unicode': 'dn/dE', 'tex': r"$dn/dE$"}[style]
+		elif self.qty == 'e':  # charge density
+			if style == 'tex':
+				qstr = ['q', r'\lambda', r'\sigma', r'\rho'][self.kdim]
+				return ("$%s$" % qstr) if integrated else ("$d%s/dE$" % qstr)
+			elif style == 'unicode':
+				qstr = ['q', '\u03bb', '\u03c3', '\u03c1'][self.kdim]
+				return qstr if integrated else ("d%s/dE" % qstr)
+			elif style == 'plain':
+				qstr = ['q', 'lambda', 'sigma', 'rho'][self.kdim]
+				return qstr if integrated else ("d%s/dE" % qstr)
+			elif style == 'raw':
+				return 'IDOS' if integrated else 'DOS'
+			else:
+				return None
+		else:
+			return "IDOS" if integrated else "DOS"
diff --git a/kdotpy-v1.0.0/src/kdotpy/density/densityz.py b/kdotpy-v1.0.0/src/kdotpy/density/densityz.py
new file mode 100644
index 0000000000000000000000000000000000000000..d784909cbd0a899dfc46a0c5ae34d7d2ee3966ef
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/density/densityz.py
@@ -0,0 +1,373 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import sys
+
+import numpy as np
+
+from .base import int_dos_by_band, int_dos_by_band_ll
+from .densitydata import idos_at_energy
+from ..config import get_config
+from ..erange import get_erange
+from ..momentum import VectorGrid, Vector
+from ..types import DiagData
+
+### DENSITY AS FUNCTION OF Z ###
+def densityz_energy(
+		data: DiagData, erange, nz: int, norb: int = 8, dz: float = 1.,
+		broadening = None, electrons=False, holes=False):
+	"""Carrier density as function of z and energy calculated from integrated DOS.
+
+	Note:
+	The result is a proper density of states in units of e / nm^3. In contrast
+	to earlier versions of this function, we multiply the result from
+	int_dos_by_band(), which is a volume in 2-dimensional momentum space, by
+	1 / (2 pi)^2.
+
+	Arguments:
+	data        	DiagData instance.
+	erange         	Tuple of 3 floats or array. Energy range (min, max, res) or
+					energy values in meV.
+	nz, norb    	Integers. Number of z values and number of orbitals,
+					respectively. These values are needed to shape the arrays to the
+					appropriate size.
+	dz          	Float. Resolution (step size) of z coordinate in nm.
+	broadening  	Broadening parameter.
+
+	Returns:
+	idos      	    Numpy array. The particle density as function of z and E.
+	"""
+	if not electrons and not holes:
+		raise ValueError("The arguments electrons and holes may not be both False")
+
+	ee = get_erange(erange)
+	b_indices = data.get_all_bindex()
+	nk = len(data.get_momentum_grid())
+
+	# get psi2z and store it in dictionary
+	psi2z = {b: np.zeros((nk, nz)) for b in b_indices}
+	for i, d in enumerate(data):  # loop through k points
+		if d.eivec is None:
+			sys.stderr.write("ERROR (densityz_energy): Eigenvectors are unavailable.\n")
+			return None
+		for b, ev in zip(d.get_ubindex(), d.eivec.T):
+			# loop through b indices
+			# calculate psi^2
+			psi2b = np.real(ev.conjugate() * ev) / dz
+			# move orbital into own dimension and sum over it
+			psi2bz = np.sum(psi2b.reshape(nz, norb), axis = 1)
+			# write into dict
+			psi2z[b][i, :] = psi2bz
+
+	# Calculate integrated DOS weighted with psi2z
+	idos = int_dos_by_band(
+		data, ee, broadening = broadening, psi2z = psi2z,
+		electrons = electrons, holes = holes,
+	) / (2. * np.pi)**2
+	return idos
+
+
+def densityz(data: DiagData, target_energy: float, erange, nz: int, broadening = None, **kwds):
+	"""Carrier density as function of z calculated from integrated DOS.
+
+	This function is a wrapper around densityz_energy(); see also the comments
+	there.
+
+	Note:
+	The result is a proper density of states in units of e / nm^3. In contrast
+	to earlier versions of density_energy(), we multiply the result from
+	int_dos_by_band(), which is a volume in 2-dimensional momentum space, by
+	1 / (2 pi)^2.
+
+	Arguments:
+	data        	DiagData instance.
+	target_energy   Float. Energy value at which the density should be evaluated.
+	erange         	Tuple of 3 floats or array. Energy range (min, max, res) or
+	                energy values in meV.
+	nz              Integer. Number of z values, needed to shape the array
+	                to the appropriate size.
+	broadening      Broadening parameter.
+	**kwds          Further arguments passed to densityz_energy().
+
+	Returns:
+	pdensz      	Numpy array. The particle density at each z value.
+	"""
+
+	# Only apply optimization if no broadening is requested
+	if broadening is None:
+		ee = get_erange(erange)
+		upper_index = np.argmax(ee > target_energy)
+		if upper_index > 0:
+			erange = (ee[upper_index-1], ee[upper_index], erange[2])
+		else:
+			return None
+	ee = get_erange(erange)
+
+	# Get IDOS as function of z and E (energy)
+	idos = densityz_energy(data, erange, nz = nz, broadening=broadening, **kwds)
+	if idos is None:
+		return None
+
+	# Get interpolated idos value for each z-point
+	pdensz = np.array([idos_at_energy(target_energy, ee, x) for x in idos])
+
+	# Return particle density
+	return pdensz
+
+
+def densityz_ll(
+		data: DiagData, target_energy, erange, nz: int, norb: int = 8, dz: float = 1.,
+		broadening = None, electrons=False, holes=False, offset_vol = None,
+		assume_sorted_aligned = False):
+	"""Carrier density as function of z calculated from integrated DOS.
+
+	The integration is done over the inverse area of one LL. This is effectively
+	an integration over the Brillouin zone.
+
+	Arguments:
+	data            DiagData instance.
+	target_energy   List of float. Energy value at which the density will be evaluated.
+	erange         	Tuple of 3 floats or array. Energy range (min, max, res) or
+	                energy values in meV.
+	nz, norb        Integers. Number of z values and number of orbitals,
+	                respectively. These values are needed to shape the arrays to
+	                the appropriate size.
+	dz              Float. Resolution (step size) of z coordinate in nm.
+	broadening      Broadening parameter.
+	offset_vol      Add this offset density to the result. For use with the full-diag
+	                SC implementation.
+
+	Returns:
+	pdensz          Numpy array. The particle density at each z value.
+	"""
+	if not electrons and not holes:
+		raise ValueError("The arguments electrons and holes may not be both False")
+
+	ee = get_erange(erange)
+	b_indices = data.get_all_bindex()
+	bval = data.get_paramval()
+
+	# Erange optimization
+	# Only apply optimization if no broadening is requested
+	if broadening is None:
+		upper_index = np.argmax(ee[:,np.newaxis] > target_energy, axis=0)
+		erange = (ee[np.min(upper_index[np.nonzero(upper_index)])-1], ee[np.max(upper_index)], erange[2])
+		ee = get_erange(erange)
+
+	if isinstance(bval, VectorGrid):
+		bzval = bval.get_values('bz')
+	elif isinstance(bval, list) and len(bval) > 0 and isinstance(bval[0], Vector):
+		bzval = [b.z() for b in bval]
+	elif isinstance(bval, list) and len(bval) > 0 and isinstance(bval[0], (float, np.floating, int, np.integer)):
+		bzval = bval
+	else:
+		raise TypeError("Invalid values for bval")
+	nB = len(bzval)
+
+	# get psi2z and store it in dictionary
+	psi2z = {b: np.zeros((nB, nz)) for b in b_indices}
+	for i, d in enumerate(data):
+		# loop through k points; take different output for k-/B-dependence into account
+		bidx = list(zip(d.llindex, d.bindex)) if isinstance(b_indices[0], tuple) else d.get_ubindex()
+		for b, ev in zip(bidx, d.eivec.T):
+			# loop through b indices
+			# calculate psi^2
+			psi2b = np.real(ev.conjugate() * ev) / dz
+			# move orbital (and LLindex) into own dimension and sum over it; axial approximation
+			if isinstance(b, tuple):
+				# axial approximation
+				psi2bz = np.sum(psi2b.reshape(nz, norb), axis = 1)
+			else:
+				# noax
+				psi2bz = np.sum(psi2b.reshape(-1, nz, norb), axis=(0, 2))
+			# write into dict
+			psi2z[b][i, :] = psi2bz
+
+	# calculate idos (with new function, please check!)
+	idos = int_dos_by_band_ll(
+		data, ee, broadening=broadening, electrons=electrons, holes=holes,
+		psi2z=psi2z, assume_sorted_aligned=assume_sorted_aligned
+	)
+
+	# Apply broadening; redundant because already done in int_dos_by_band_ll() ??
+	# if 'verbose' in sys.argv:
+	# 	print("int_dos_by_band: call idos_broadening", broadening)
+	# idos = idos_broadening(idos, ee, broadening=broadening)
+
+	# Get idos value for each z-point
+	pdensz = np.array(
+		[[idos_at_energy(te_b, ee, idos_b_z, suppress_warning=True) for idos_b_z in idos_b] for te_b, idos_b in zip(target_energy, idos)],
+		dtype=float)
+
+	if offset_vol is not None:
+		pdensz += offset_vol[:,np.newaxis]
+
+	# replace nans with zero; ToDo: handle NaNs differently?
+	np.nan_to_num(pdensz, copy=False)
+
+	# Set densities which couldn't be calculated to first density that could be calculated
+	pdensz_zero = np.all(pdensz == 0, axis=1)
+	if np.count_nonzero(pdensz_zero) < len(pdensz):
+		# only replace zero pdensz if at least one pdensz is non-zero
+		# otherwise do nothing
+		# first_pdensz = pdensz[np.where(np.invert(pdensz_zero))[0][0]]
+		# pdensz[pdensz_zero] = first_pdensz
+		first_idx = np.where(np.invert(pdensz_zero))[0][0]
+		pdensz[pdensz_zero] = pdensz[first_idx]
+		sys.stderr.write(f"Warning (densityz_ll): Replaced all pdensz for magnetic fields <{bzval[first_idx]:.3f}T with the one from {bzval[first_idx]:.3f}T.\n")
+
+	# Return particle density
+	return pdensz
+
+
+def densityz_surface_states(params, n_surf, d_surf = 8.0, smoothing = 0.0):
+	"""Simulated background density rho(z) at surfaces, net charge neutral
+
+	Apply a uniform density near the surfaces of the well layer and compensate
+	in the bulk such that the total density is zero.
+
+	Arguments:
+	params     PhysParams instance
+	n_surf     Number or 2-tuple. If numeric, apply this surface density (in
+	           nm^-2) to both bottom and top surface in the well layer. If a
+	           2-tuple, apply two different densities to bottom and top layer,
+	           respectively. If one of the two values is None, that respective
+	           surface is not considered, i.e., the bulk extends completely to
+	           the interface of the well layer. The value (None, None) is not
+	           permitted.
+	d_surf     Number. Thickness of the surface layer(s) in nm.
+	smoothing  Number >= 0. The amount of smoothing at the edges of each region.
+	           The value is the characteristic width in nm of the density
+	           function at the edge, of the form tanh((z - z_i) / smoothing).
+
+	Returns:
+	densz   Numpy array of dimension 1. The background density as function of z.
+	"""
+	if isinstance(n_surf, (float, int, np.floating, np.integer)):
+		n_bot, n_top = n_surf, n_surf
+	elif isinstance(n_surf, tuple) and len(n_surf) == 2:
+		n_bot, n_top = n_surf
+	else:
+		raise TypeError("Argument n_surf must be a number or a 2-tuple")
+	if n_bot is None and n_top is None:
+		raise ValueError("Argument n_surf cannot be (None, None)")
+
+	n_bulk = -n_top if n_bot is None else -n_bot if n_top is None else -(n_top + n_bot)
+
+	zval = params.zvalues_nm()
+	i_bot, i_top = params.well_z()
+	z_bot = zval[i_bot]
+	z_top = zval[i_top]
+	z_bot_inner = z_bot if n_bot is None else z_bot + d_surf
+	z_top_inner = z_top if n_top is None else z_top - d_surf
+	d_bulk = z_top_inner - z_bot_inner
+	if d_bulk <= 0:
+		raise ValueError("Argument d_bulk must be a positive number")
+
+	if smoothing < 0.0:
+		raise ValueError("Argument smoothing must be >= 0")
+
+	densz = np.zeros_like(zval)
+	rho_bulk = n_bulk / d_bulk
+	if smoothing > 0.0:
+		densz += 0.5 * (np.tanh((zval - z_bot_inner) / smoothing) - np.tanh((zval - z_top_inner) / smoothing)) * rho_bulk
+		if n_bot is not None:
+			rho_bot = n_bot / d_surf
+			densz += 0.5 * (np.tanh((zval - z_bot) / smoothing) - np.tanh((zval - z_bot_inner) / smoothing)) * rho_bot
+		if n_top is not None:
+			rho_top = n_top / d_surf
+			if smoothing > 0.0:
+				densz += 0.5 * (np.tanh((zval - z_top_inner) / smoothing) - np.tanh((zval - z_top) / smoothing)) * rho_top
+		# No compensation for numerical accuracies needed. However, these can
+		# occur still, if an interface is close to the edge of the zval array.
+	else:
+		densz[(zval > z_bot_inner) & (zval < z_top_inner)] = rho_bulk
+		if n_bot is not None:
+			rho_bot = n_bot / d_surf
+			densz[(zval > z_bot) & (zval < z_bot_inner)] = rho_bot
+			densz[np.abs(zval - z_bot) < 1e-6] = rho_bot / 2
+			densz[np.abs(zval - z_bot_inner) < 1e-6] = (rho_bot + rho_bulk) / 2
+		if n_top is not None:
+			rho_top = n_top / d_surf
+			densz[(zval < z_top) & (zval > z_top_inner)] = rho_top
+			densz[np.abs(zval - z_top) < 1e-6] = rho_top / 2
+			densz[np.abs(zval - z_top_inner) < 1e-6] = (rho_top + rho_bulk) / 2
+
+		# Correct numerical inaccuracies (e.g., when the value of d_surf is
+		# incommensurate with the lattice of z coordinates.
+		dens = densz.sum()
+		densn = np.count_nonzero(((zval < z_top) & (zval > z_bot)))
+		densz[(zval < z_top) & (zval > z_bot)] -= dens / densn
+
+	return densz
+
+def print_densityz(params, densz, cardens = None):
+	"""Print summary of density as function of z
+
+	Arguments:
+	params        PhysParams instance.
+	densz         dict instance. We extract the values for the keys 'total',
+	              'e', 'h', and 'bg'. If a value is None, it is ignored.
+	cardens       Numerical value or None. If set, add a line with the requested
+	              carrier density at the bottom.
+
+	No return value.
+	"""
+	dz = params.zres
+	density_unit = get_config('dos_unit')
+	ustr = "* 10^18 e/m^2" if density_unit == 'm' else "* 10^11 e/cm^2" if density_unit == 'cm' else "* 10^-3 e/nm^2"
+
+	print("Densities:")
+	densz_e = densz.get('e')
+	densz_h = densz.get('h')
+	densz_bg = densz.get('bg')
+	if densz_e is not None:
+		print("Electrons (e):    n = %8.3f %s" % (-np.sum(densz_e) * dz * 1000, ustr))
+	if densz_h is not None:
+		print("Holes     (h):    p = %8.3f %s" % (np.sum(densz_h) * dz * 1000, ustr))
+	if densz_e is not None and densz_h is not None:
+		print("Together     :  p-n = %8.3f %s" % ((np.sum(densz_h) + np.sum(densz_e)) * dz * 1000, ustr))
+	if densz_bg is not None:
+		print("Background   : n_BG = %8.3f %s" % (np.sum(densz_bg) * dz * 1000, ustr))
+	if densz_e is not None and densz_h is not None and densz_bg is not None:
+		print("Total        :        %8.3f %s" % ((np.sum(densz_h) + np.sum(densz_e) + np.sum(densz_bg)) * dz * 1000, ustr))
+
+	if cardens is not None:
+		print("Requested    :        %8.3f %s" % (-cardens * 1000, ustr))
diff --git a/kdotpy-v1.0.0/src/kdotpy/density/elements.py b/kdotpy-v1.0.0/src/kdotpy/density/elements.py
new file mode 100644
index 0000000000000000000000000000000000000000..e9f8de3a26951f0e57dc8541683dcfefb22c1e08
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/density/elements.py
@@ -0,0 +1,463 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+
+### HELPER FUNCTIONS ###
+def midpoints(x):
+	"""Return intermediate points of a 1-, 2-, or 3-dimensional array by interpolation"""
+	if x.ndim == 1:
+		return 0.5 * (x[1:] + x[:-1])
+	elif x.ndim == 2:
+		return 0.25 * (x[1:, 1:] + x[1:, :-1] + x[:-1, 1:] + x[:-1, :-1])
+	elif x.ndim == 3:
+		return 0.125 * (
+			x[1:, 1:, 1:] + x[1:, :-1, 1:] + x[:-1, 1:, 1:] + x[:-1, :-1, 1:]
+			+ x[1:, 1:, :-1] + x[1:, :-1, :-1] + x[:-1, 1:, :-1] + x[:-1, :-1, :-1]
+		)
+	else:
+		raise ValueError("Argument x must be an array of dimension 1, 2, or 3")
+
+def elementary_triangles():
+	"""Define four elementary triangles spanned by two vertices of the square and the origin.
+
+	Since the data points are arranged on a two-dimensional grid, consider
+	square where the four vertices are neighbouring data points. The four
+	elementary triangles are formed by taking two neighouring vertices and the
+	center point (0, 0) of the square (not included explicitly). If we label the
+	vertices as follows:
+	3   4
+	  5
+	1   2
+	the elementary triangles are 1 2 5, 1 3 5, 3 4 5, and 2 4 5.
+	"""
+	triangles = [
+		((1, 1), (-1, 1)),  # 1 2 5
+		((1, 1), (1, -1)),  # 1 3 5
+		((1, -1), (-1, -1)),  # 3 4 5
+		((-1, 1), (-1, -1))  # 2 4 5
+	]
+	return triangles
+
+def elementary_tetrahedra(which = 1):
+	"""Define 12 elementary tetrahedra spanned by three vertices of the cube [-1, 1]^3 and the origin.
+
+	Since the data points are arranged on a three-dimensional grid, consider
+	cubes where the eight vertices are neighbouring data points. For each face
+	of the cube, take two points diagonally. Then form two tetrahedra by using
+	these two points, one of the two other points of this face [function tet()],
+	and the center point (0, 0, 0) of the cube (not included explicitly).
+
+	Argument:
+	which   1 or -1. From which set of four vertices "the diagonal points" are
+	        taken.
+
+	Returns:
+	alltetrahedra   A list of 12 elements, for which each element is 3-tuple of
+	                vertex coordinates. Each of the vertex coordinates is also a
+	                3-tuple.
+	"""
+	if which not in [1, -1]:
+		raise ValueError("Argument 'which' has to be 1 or -1.")
+	vertices1 = [(x, y, z) for z in [-1, 1] for y in [-1, 1] for x in [-1, 1] if x * y * z == 1]
+	vertices2 = [(x, y, z) for z in [-1, 1] for y in [-1, 1] for x in [-1, 1] if x * y * z == -1]
+	vertexpairs1 = [(vertices1[j1], vertices1[j2]) for j1 in range(0, 3) for j2 in range(j1 + 1, 4)]
+	def tet(vp):
+		a = np.asarray(vp)
+		f = (a[0] + a[1]) / 2
+		points3 = [v2 for v2 in vertices2 if np.dot(f, v2) == 1]
+		return [(tuple(a[0]), tuple(a[1]), tuple(p3)) for p3 in points3]
+	return [t for vp in vertexpairs1 for t in tet(vp)]
+
+## To translate value 1 to slice '1:' and value -1 to slice ':-1'
+to_slice = [None, np.s_[1:], np.s_[:-1]]
+def values_over_simplex(arr, simplex, arr_mid=None):
+	"""Get values in all corresponding vertices of the simplex
+
+	The array d.o.f. (three axes) is the implicit iteration over all
+	squares/cubes in the data grid. The slicing is done, so that we get the
+	indices 0, ..., n - 2 for vertex coordinate = -1 and 1, ..., n - 1 for
+	vertex coordinate = 1.
+
+	Argument:
+	arr      Numpy array. The values at the vertices.
+	simplex  Tuple of tuples. Each member represents an elementary triangle or
+	         tetrahedron, i.e., the list elements that elementary_triangles() or
+	         elementary_tetrahedra() produce. The length of the inner tuple must
+	         be equal to arr.ndim. The length of the outer tuple is the number
+	         of vertices nv in the simplex minus 1.
+	arr_mid  Numpy array or None. The values at the mid points. If None, it is
+	         calculated internally, but it may be precalculated to gain a
+	         (little) speed bonus when this function is called repeatedly.
+
+	Returns:
+	arr_spx  Numpy array. The first arr.ndim axes iterate over the grid. The
+	         last axis is the iteration over the nv vertices of each simplex.
+	"""
+	# Array values at the vertex points
+	slices = [tuple(to_slice[x] for x in vertex) for vertex in simplex]
+	arr_v = [arr[s] for s in slices]
+	# Add the array value at the mid point and return
+	if arr_mid is None:
+		arr_mid = midpoints(arr)
+	return np.stack([*arr_v, arr_mid], axis=-1)
+
+def interpolate2d(x, dims, weights=None):
+	"""Interpolate squared wavefunction for 2d k-grid by calculating (weighted) mean value over corners of grid squares in k space.
+	Pattern (same as in int_dos_by_band() in densitybase.py):
+	3   4
+	1   2
+
+	Arguments:
+	x			Numpy array with shape (nkx * nky, nz). The 2d k-grid is stacked
+	            in the first axis.
+	dims		Tuple. Dimension size of k-grid (nkx, nky). Used for reshaping.
+	weights		Iterable. Weight factors for each corner. First element for
+	            corner 1, second element for corner 2, and so on. If omitted,
+	            weights = [1, 1, 1, 1].
+
+	Returns:
+	x_interpolated	Numpy array of shape ((nkx - 1) * (nky - 1), nz)
+	"""
+	if weights is None:
+		weights = np.array([1, 1, 1, 1])
+
+	initial_shape = x.shape
+	x_reshaped = x.reshape(dims + initial_shape[1:])
+
+	x_interpolated = 1 / np.sum(np.asarray(weights)) * (
+		weights[0] * x_reshaped[:-1, :-1] + weights[1] * x_reshaped[:-1, 1:]
+		+ weights[2] * x_reshaped[1:, :-1] + weights[3] * x_reshaped[1:, 1:]
+	)
+
+	return x_interpolated.reshape(((dims[0] - 1) * (dims[1] - 1),) + initial_shape[1:])
+
+### IDOS AND AREA/VOLUME ELEMENTS ###
+
+def linear_idos_element(z, zval, holes = False):
+	"""Calculate DOS elements from function data (1D)
+
+	The result gives the sizes of the overlaps between the intervals
+	(z[i, 0], z[i, 1]) and the grid elements defined by zval.
+	The result can be used the integrand of an integral over k. For calculation
+	of such integral, it is needed to multiply the integrand by the appropriate
+	volume elements ('dk').
+
+	Arguments:
+	z      Numpy array of shape (n_z, 2), where nz may be arbitrary. These are
+	       typically function values like (E[k_i], E[k_{i+1}]) that defines the
+	       curve between two points in the dispersion, for a single band.
+	zval   Numpy array of shape (n_zval,), where n_zval may be arbitrary. This
+	       is the grid for which the overlaps are calculated. This is typically
+	       the energy range.
+	holes  True or False. If False, count electrons, i.e., from 0 to 1. If True,
+	       count holes, i.e., from -1 to 0.
+
+	Returns:
+	lz     Numpy array of shape (n_z, n_zval).
+	"""
+	z = np.sort(z, axis = -1)  # reorder along last axis
+
+	z0 = z[:, 0]
+	z1 = z[:, 1]
+	z10 = z1 - z0
+	lz = np.divide(zval[np.newaxis, :] - z0[:, np.newaxis], z10[:, np.newaxis], where = (z10[:, np.newaxis] != 0.0))
+
+	# avoid divisions by zero
+	with np.errstate(invalid='ignore'):
+		# comparison with NaN may pass silently here; we take care of them later
+		cond0 = zval[np.newaxis, :] < z0[:, np.newaxis]
+		cond1 = zval[np.newaxis, :] < z1[:, np.newaxis]
+	lz = np.where(cond0, np.zeros_like(lz), np.where(cond1, lz, np.ones_like(lz)))
+
+	# count holes oppositely
+	if holes:
+		lz -= 1.0
+
+	# deal with NaN values; set these to zero
+	condnan = np.isnan(z0) | np.isnan(z1)
+	lz[condnan, :] = 0.0
+	return lz
+
+def triangle_idos_element(z, zval, holes = False):
+	"""Calculate DOS elements from triangulation data (2D)
+
+	Given a triangle in three dimensions, consider its intersection with the
+	interval [z1, z2]. Project the result to the (x, y) plane and calculate the
+	area of the result. This function iterates over triangles defined by
+	(z[i, 0], z[i, 1], z[i, 2]) and returns the fractions of the said area with
+	respect to the projected area of the full triangle.
+	The result can be used the integrand of an integral over (kx, ky). For
+	calculation	of such integral, it is needed to multiply the integrand by the
+	appropriate	volume elements ('dkx dky').
+
+	Arguments:
+	z      Numpy array of shape (nx, ny, 3), where nz may be arbitrary. These
+	       are typically function values like (E[k_1], E[k_2], E[k_3]) that
+	       define a triangulation of the dispersion E(kx, ky) for a single band.
+	zval   Numpy array of shape (n_zval,), where n_zval may be arbitrary. This
+	       is the grid for which the overlaps are calculated. This is typically
+	       the energy range.
+	holes  True or False. If False, count electrons, i.e., from 0 to 1. If True,
+	       count holes, i.e., from -1 to 0.
+
+	Returns:
+	lz     Numpy array of shape (nx * ny, n_zval). Note that the first two
+	       indices of the input array z are flattened.
+	"""
+	# order by z value
+	z = np.sort(z, axis = -1)  # reorder along last axis
+
+	z0 = z[:, :, 0].flatten()
+	z1 = z[:, :, 1].flatten()
+	z2 = z[:, :, 2].flatten()
+	z10 = z1 - z0
+	z20 = z2 - z0
+	z21 = z2 - z1
+	lz1 = np.divide(
+		(zval[np.newaxis, :] - z0[:, np.newaxis])**2,
+		(z10[:, np.newaxis] * z20[:, np.newaxis]),
+		where = (z10[:, np.newaxis] * z20[:, np.newaxis] != 0.0)
+	)  # avoid divisions by zero
+	lz2 = 1.0 - np.divide(
+		(zval[np.newaxis, :] - z2[:, np.newaxis])**2,
+		(z21[:, np.newaxis] * z20[:, np.newaxis]),
+		where = (z21[:, np.newaxis] * z20[:, np.newaxis] != 0.0)
+	)  # avoid divisions by zero
+
+	with np.errstate(invalid='ignore'):
+		# comparison with NaN may pass silently here; we take care of them later
+		cond0 = zval[np.newaxis, :] < z0[:, np.newaxis]
+		cond1 = zval[np.newaxis, :] < z1[:, np.newaxis]
+		cond2 = zval[np.newaxis, :] < z2[:, np.newaxis]
+	lz = np.where(cond0, np.zeros_like(lz1), np.where(cond1, lz1, np.where(cond2, lz2, np.ones_like(lz1))))
+
+	# count holes oppositely
+	if holes:
+		lz -= 1.0
+
+	# deal with NaN values; set these to zero
+	condnan = np.isnan(z0) | np.isnan(z1) | np.isnan(z2)
+	lz[condnan, :] = 0.0
+	return lz
+
+def triangle_area_element(x, y, polar = False):
+	"""Calculate volume elements from triangulation data.
+
+	This calculates the volume (base area, i.e., area of the projection to the
+	x, y plane) of the triangle defined by the vertices (x[i, j, k], y[i, j, k])
+	where k = 0, 1, 2 iterates over the three vertices and i, j iterate over
+	the data array (typically kx, ky). The result encodes the 'dA' ('dkx dky' or
+	'r dr dphi') in the definition of the integral.
+
+	Arguments:
+	x, y    Two numpy arrays of must be of shape (nx, ny, 3)
+	polar   True or False. If True, treat x, y as r, phi and calculate the area
+	        of the 'triangle' in these coordinates (dA = r dr dphi). If False,
+	        use cartesian coordinates (dA = dkx dky)
+
+	Returns:
+	area    Numpy array of shape (nx * ny, n_zval). Note that the first two
+	        indices of the input arrays x, y are flattened.
+	"""
+	x1 = (x[:, :, 1] - x[:, :, 0]).flatten()
+	x2 = (x[:, :, 2] - x[:, :, 0]).flatten()
+	y1 = (y[:, :, 1] - y[:, :, 0]).flatten()
+	y2 = (y[:, :, 2] - y[:, :, 0]).flatten()
+	area = 0.5 * np.abs(x1 * y2 - x2 * y1)  # A (triangle area)
+
+	if polar:
+		xavg = np.sum(x, axis = -1).flatten() / 3.
+		return area * xavg  # A * (r[0] + r[1] + r[2]) / 3
+	else:
+		return area  # A
+
+def tetrahedral_idos_element(f, fval, holes = False):
+	"""Calculate DOS elements from triangulation data (3D)
+
+	Equivalent of triangle_idos_element for three dimensional input.
+	This function returns the volume of the set given by fval < f(x, y, z)
+	intersected with an elementary tetrahedron given by x > 0, y > 0, z > 0,
+	x + y + z < 1. The function values ate the vertices of the tetrahedra are
+	defined by (f[i, 0], f[i, 1], f[i, 2], f[i, 3]).
+	The result can be used the integrand of an integral over (kx, ky, kz). For
+	calculation	of such integral, it is needed to multiply the integrand by the
+	appropriate	volume elements ('dkx dky dkz').
+
+	Arguments:
+	f      Numpy array of shape (nx, ny, nz, 4), where nx, ny, nz may be
+	       arbitrary. These are typically function values like
+	       (E[k_1], E[k_2], E[k_3], E[k_4]) that define a triangulation of the
+	       dispersion E(kx, ky, kz) for a single band.
+	fval   Numpy array of shape (n_fval,), where n_fval may be arbitrary. This
+	       is the grid for which the overlaps are calculated. This is typically
+	       the energy range.
+	holes  True or False. If False, count electrons, i.e., from 0 to 1. If True,
+	       count holes, i.e., from -1 to 0.
+
+	Returns:
+	lz     Numpy array of shape (nx * ny * nz, n_fval). Note that the first two
+	       indices of the input array f are flattened.
+	"""
+	# order by f value
+	f = np.sort(f, axis = -1)  # reorder along last axis
+	nf = f.shape[0] * f.shape[1] * f.shape[2]
+
+	# Do test if all values of f are outside the bounds of the range of fval
+	# If so, the calculation is trivial.
+	fmin = np.nanmin(f)
+	fmax = np.nanmax(f)
+	fvalmin = np.amin(fval)
+	fvalmax = np.amax(fval)
+	nfval = len(fval)
+	if fvalmax < fmin:  # range fval below all values of f; -1 for h, 0 for e
+		return -np.ones(shape = (nf, nfval)) if holes else np.zeros(shape = (nf, nfval))
+	if fvalmin > fmax:  # range fval above all values of f; 0 for h, 1 for e
+		return np.zeros(shape = (nf, nfval)) if holes else np.ones(shape = (nf, nfval))
+
+	AX = np.newaxis  # shorthand
+	f0 = f[:, :, :, 0].flatten()
+	f1 = f[:, :, :, 1].flatten()
+	f2 = f[:, :, :, 2].flatten()
+	f3 = f[:, :, :, 3].flatten()
+	f10 = (f1 - f0)[:, AX]
+	f20 = (f2 - f0)[:, AX]
+	f30 = (f3 - f0)[:, AX]
+	f21 = (f2 - f1)[:, AX]
+	f31 = (f3 - f1)[:, AX]
+	f32 = (f3 - f2)[:, AX]
+	f20_f30 = f20 * f30
+	f10_f20_f30 = f10 * f20_f30
+	f10_f21_f31 = f10 * f21 * f31
+	f30_f31_f32 = f30 * f31 * f32
+
+	lf1 = np.divide(
+		(fval[AX, :] - f0[:, AX])**3, f10_f20_f30,
+		where = (f10_f20_f30 != 0.0)
+	)
+	f2f_f2f0 = np.divide(f2[:, AX] - fval[AX, :], f20, where = (f20 != 0.0))
+	f3f_f3f0 = np.divide(f3[:, AX] - fval[AX, :], f30, where = (f30 != 0.0))
+	lf2a = np.divide(
+		(fval[AX, :] - f0[:, AX])**2, f20_f30,
+		where = (f20_f30 != 0.0)
+	) * (1 + f2f_f2f0 + f3f_f3f0)
+	lf2b = lf1 - np.divide(
+		(fval[AX, :] - f1[:, AX])**3, f10_f21_f31,
+		where = (f10_f21_f31 != 0.0)
+	)
+	lf2 = np.where(f21 == 0.0, lf2a, lf2b)
+	lf3 = 1.0 - np.divide(
+		(f3[:, AX] - fval[AX, :])**3, f30_f31_f32,
+		where = (f30_f31_f32 != 0.0)
+	)
+	# avoid divisions by zero
+
+	with np.errstate(invalid='ignore'):
+		# comparison with NaN may pass silently here; we take care of them later
+		cond0 = fval[AX, :] < f0[:, AX]
+		cond1 = fval[AX, :] < f1[:, AX]
+		cond2 = fval[AX, :] < f2[:, AX]
+		cond3 = fval[AX, :] < f3[:, AX]
+
+	# Note the property f0 <= f1 <= f2 <= f3, so that we have a chain of subset
+	# relations: cond0 ⊆ cond1 ⊆ cond2 ⊆ cond3. Thus, setting the array as below
+	# is equivalent to
+	# where(cond0, 0, where(cond1, lf1, where(cond2, lf2, where(cond3, lf3, 1))))
+	lf = np.ones_like(lf1)
+	lf[cond3] = lf3[cond3]
+	lf[cond2] = lf2[cond2]
+	lf[cond1] = lf1[cond1]
+	lf[cond0] = 0.0
+
+	# count holes oppositely
+	if holes:
+		lf -= 1.0
+
+	# deal with NaN values; set these to zero
+	condnan = np.isnan(f0) | np.isnan(f1) | np.isnan(f2) | np.isnan(f3)
+	lf[condnan, :] = 0.0
+	return lf
+
+def tetrahedral_volume_element(x, y, z, spherical = False, cylindrical = False):
+	"""Calculate volume elements from triangulation data (3D).
+
+	This calculates the volume of the tetrahedron defined by the vertices
+	(x[i, j, k, l], y[i, j, k, l], z[i, j, k, l])
+	where l = 0, 1, 2, 3 iterates over the four vertices and i, j, k iterate
+	over the data array (typically kx, ky, kz). The result encodes the 'dA'
+	('dkx dky dkz', 'r dr dphi dz' or 'r^2 sin(theta) dr dphi dtheta') in the
+	definition of the integral.
+
+	Arguments:
+	x, y, z      Three numpy arrays of must be of shape (nx, ny, 4)
+	spherical    True or False. If True, treat x, y, z as r, theta, phi and
+	             calculate the volume of the tetrahedron in spherical
+	             coordinates (dA = r^2 sin(theta) dr dtheta dphi).
+	cylindrical  True or False. If True, treat x, y, z as r, phi, z and
+	             calculate the volume of the tetrahedron in cylindrical
+	             coordinates (dA = r dr dphi dz).
+
+	Returns:
+	area    Numpy array of shape (nx * ny * nz, n_zval). Note that the first two
+	        indices of the input arrays x, y, z are flattened.
+	"""
+	x1 = (x[:, :, :, 1] - x[:, :, :, 0]).flatten()
+	x2 = (x[:, :, :, 2] - x[:, :, :, 0]).flatten()
+	x3 = (x[:, :, :, 3] - x[:, :, :, 0]).flatten()
+	y1 = (y[:, :, :, 1] - y[:, :, :, 0]).flatten()
+	y2 = (y[:, :, :, 2] - y[:, :, :, 0]).flatten()
+	y3 = (y[:, :, :, 3] - y[:, :, :, 0]).flatten()
+	z1 = (z[:, :, :, 1] - z[:, :, :, 0]).flatten()
+	z2 = (z[:, :, :, 2] - z[:, :, :, 0]).flatten()
+	z3 = (z[:, :, :, 3] - z[:, :, :, 0]).flatten()
+	vol = np.abs(x1 * y2 * z3 + x2 * y3 * z1 + x3 * y1 * z2 - x2 * y1 * z3 - x1 * y3 * z2 - x3 * y2 * z1) / 6  # V (tetrahedral volume, det(X Y Z) / 6)
+
+	if cylindrical:  # (r, phi, z)
+		if spherical:
+			raise ValueError("Arguments cylindrical and spherical may not both be True.")
+		ravg = np.sum(x, axis = -1).flatten() / 4.
+		return vol * ravg  # V * (r[0] + r[1] + r[2] + r[3]) / 4
+	elif spherical:  # (r, theta, phi)
+		r2avg = np.sum(x**2, axis = -1).flatten() / 4.  # r^2
+		sintheta_avg = np.sum(np.sin(y), axis = -1).flatten() / 4.  # sin(theta)
+		return vol * r2avg * sintheta_avg
+	else:  # cartesian
+		return vol  # V
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/density/intobs.py b/kdotpy-v1.0.0/src/kdotpy/density/intobs.py
new file mode 100644
index 0000000000000000000000000000000000000000..0c6d3679657806d018caaed740d96166a87c7449
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/density/intobs.py
@@ -0,0 +1,159 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import sys
+
+from .base import int_obsval
+from .broadening import BroadeningFunction, MultiBroadening, idos_broadening
+from .densitydata import data_interpolate_for_ldos, IntegratedObservable
+from ..erange import get_erange
+from ..momentum import VectorGrid
+
+### INTEGRATED OBSERVABLES ###
+def integrated_observable(eidata, obs, erange, params, broadening = None, local = False, min_res = None, split_pm = False):
+	"""Calculate integrated observable
+
+	Arguments:
+	eidata      DiagData instance.
+	obs         String or integer. Observable name or index.
+	erange      Tuple of 3 floats or array. Energy range (min, max, res) or
+	            energy values in meV.
+	params      PhysParams instance. The physical parameters.
+	broadening  Broadening parameter.
+	local       True or False. If True, calculate local integrated observable.
+		        If False, integrate over the x values
+	min_res     Minimal resolution; interpolate if necessary
+	split_pm    True or False. If True, calculate the integrated observable for
+	            + and - bands separately. The separation is done via selection
+	            of bands which have band character label + or - at zero k or B.
+
+	Returns:
+	io     IntegratedObservable instance. If split_pm is True, a tuple of two of
+	       them.
+	"""
+	if 'verbose' in sys.argv:
+		print('integrated_observable: broadening', broadening)
+
+	## Energy values
+	ee = get_erange(erange)
+
+	# interpolate if necessary
+	if min_res is not None:
+		eidata_ip = data_interpolate_for_ldos(eidata, min_res, obs = True)
+	else:
+		eidata_ip = eidata
+
+	if isinstance(broadening, (BroadeningFunction, MultiBroadening)):
+		broadening_ip = broadening.interpolate_width(len(eidata_ip))
+	elif broadening is None:
+		broadening_ip = broadening
+	else:
+		raise TypeError("Argument broadening must be a BroadeningFunction instance or None")
+
+	# Get zero point; detect LL mode automatically; test for band indices
+	eidata0 = eidata_ip.get_zero_point()
+	if eidata0 is None:
+		sys.stderr.write("Warning (integrated_observable): Cannot find zero point. Use base point instead.\n")
+		eidata0 = eidata_ip.get_base_point()
+	ll = eidata0.llindex is not None
+	if eidata0.bindex is None:
+		sys.stderr.write("ERROR (integrated_observable): No band indices given. Calculation of integrated observable failed.\n")
+		return None
+
+	# Get VectorGrid; define x values and integration elements da
+	vgrid = eidata_ip.grid
+	if not isinstance(vgrid, VectorGrid):
+		raise TypeError("VectorGrid expected")
+	xval = vgrid if local else None
+	if eidata_ip.get_paramval() is not None:
+		da = [1.0 for d in eidata_ip]
+	else:
+		da = vgrid.integration_element(full = True)
+	if da is None:
+		sys.stderr.write("ERROR (integrated_observable): Cannot determine integration elements. Calculation of integrated observable failed.\n")
+		return None
+
+	if split_pm:
+		# Find band indices for bands with + and - characters
+		bindex_p = [b for b, c in zip(eidata0.bindex, eidata0.char) if c.endswith('+')]
+		bindex_m = [b for b, c in zip(eidata0.bindex, eidata0.char) if c.endswith('-')]
+
+		# Find missing bands and show a warning message with useful information if needed
+		bindex_all = eidata_ip.get_all_bindex()
+		bindex_missing = [b for b in bindex_all if b not in bindex_p and b not in bindex_m]
+		if len(bindex_missing) > 0:
+			bindex_missing_e = [b for b in bindex_missing if b > 0]
+			bindex_missing_h = [b for b in bindex_missing if b < 0]
+			sys.stderr.write("Warning (integrated_observable): For integrated observable %s with +/- split, %i bands were not considered" % (obs, len(bindex_missing)))
+			bindex_missing_str = []
+			if len(bindex_missing_e) > 0:
+				bindex_missing_str.append("b >= %i" % min(bindex_missing_e))
+			if len(bindex_missing_h) > 0:
+				bindex_missing_str.append("b <= %i" % max(bindex_missing_h))
+			if len(bindex_missing_str) > 0:
+				sys.stderr.write(": " + " and ".join(bindex_missing_str))
+			sys.stderr.write(".\n")
+
+		# Calculate integrated observable for + and - bands
+		int_obs_p = int_obsval(eidata_ip, obs, ee, da, electrons = True, holes = True, local = local, sel_bindex = bindex_p)
+		int_obs_m = int_obsval(eidata_ip, obs, ee, da, electrons = True, holes = True, local = local, sel_bindex = bindex_m)
+
+		# Apply broadening
+		if broadening_ip is not None:
+			if 'verbose' in sys.argv:
+				print("integrated_observable: call idos_broadening x2", broadening)
+			int_obs_p = idos_broadening(int_obs_p, ee, broadening = broadening_ip)
+			int_obs_m = idos_broadening(int_obs_m, ee, broadening = broadening_ip)
+
+		# Return data structures of class IntegratedObservable
+		io_p = IntegratedObservable(ee, xval, obs = obs, densdata = int_obs_p, kdim = params.kdim, ll = ll, aligned_with_e0=eidata.aligned_with_e0)
+		io_m = IntegratedObservable(ee, xval, obs = obs, densdata = int_obs_m, kdim = params.kdim, ll = ll, aligned_with_e0=eidata.aligned_with_e0)
+		return io_p, io_m
+	else:
+		# Calculate integrated observable for all bands
+		int_obs = int_obsval(eidata_ip, obs, ee, da, electrons = True, holes = True, local = local)
+		# Apply broadening
+		if broadening is not None:
+			if 'verbose' in sys.argv:
+				print("integrated_observable: call idos_broadening", broadening)
+			int_obs = idos_broadening(int_obs, ee, broadening = broadening_ip)
+		# Return data structure of class IntegratedObservable
+		io = IntegratedObservable(ee, xval, obs = obs, densdata = int_obs, kdim = params.kdim, ll = ll, aligned_with_e0=eidata.aligned_with_e0)
+		return io
diff --git a/kdotpy-v1.0.0/src/kdotpy/diagonalization/__init__.py b/kdotpy-v1.0.0/src/kdotpy/diagonalization/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f0753db1b8d9e883abd838f2f8faca965fbaf50f
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/diagonalization/__init__.py
@@ -0,0 +1,41 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+from .diagdata import DiagData, DiagDataPoint
diff --git a/kdotpy-v1.0.0/src/kdotpy/diagonalization/diagdata.py b/kdotpy-v1.0.0/src/kdotpy/diagonalization/diagdata.py
new file mode 100644
index 0000000000000000000000000000000000000000..0ee088c00cad215d9073fcd64eec5805f68eb436
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/diagonalization/diagdata.py
@@ -0,0 +1,2870 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import copy
+from os import environ
+import gc
+environ['OMP_NUM_THREADS'] = '1'
+import sys
+from hashlib import md5
+
+import numpy as np
+
+from .. import types
+from ..observables import observables, ObservableList
+from ..momentum import Vector, VectorGrid, VectorTransformation, get_vectortransformation
+from ..parallel import Progress, dict_plus_array_dict
+from ..tasks import TaskManager
+from ..config import get_config
+from .. import hdf5o
+
+from .stitch import stitch
+
+# Global
+zero_point_warning_issued = False
+binfile_ddp_fields = ['eival', 'eivec', 'llindex', 'bindex']
+
+def isnumber(x):
+	return isinstance(x, (float, int, complex, np.floating, np.integer, np.complex_))
+def isnumbernone(x):
+	return x is None or isinstance(x, (float, int, complex, np.floating, np.integer, np.complex_))
+
+def array_none_to_nan(ls):
+	"""From a list which contains None mixed with other values, create an appropriate  array with NaNs on the positions of the Nones.
+	This works for 1D and 2D arrays.
+	"""
+	if not any(x is None for x in ls):  # shortcut
+		return np.asarray(ls)
+	n = None
+	for x in ls:
+		if isinstance(x, (np.ndarray, list)):
+			n = len(x)
+			break
+		elif x is not None:
+			n = 0
+			break
+	if n is None:  # only Nones
+		return None
+	elif n == 0:   # 1D array
+		return np.array([float("nan") if x is None else x for x in ls])
+	else:          # 2D array
+		return np.array([[float("nan")] * n if x is None else x for x in ls])
+
+class SubsetError(ValueError):
+	"""Merge error: New values are subset of present ones."""
+
+class NoOverlapError(ValueError):
+	"""Merge error: New values do not overlap with present ones."""
+
+class DiagDataPoint(types.DiagDataPoint):
+	"""Container class for eigenvalue and eigenstate properties for a single k or B point.
+
+	Attributes:
+	k             Float or Vector instance. Momentum value.
+	paramval      None, float or Vector instance. 'Parameter' value, currently
+	              used only as magnetic field value.
+	neig          Integer. Number of eigenvalues stored at this point.
+	dim           Integer. Dimensionality, i.e., the size of the eigenvectors.
+	eival         Numpy array or length neig. The eigenvalues in meV.
+	eivec         Numpy array of shape (dim, neig). The eigenvectors belonging
+	              to the eigenvectors. May be set to None to save memory
+	              consumption.
+	obsids        List of strings. These contain the observable ids for the
+	              observables that are stored in obsvals.
+	obsvals       Numpy array of shape (nobs, neig), where nobs = len(obsids).
+	              The array contains the observable values for the eigenstates.
+	bindex        Numpy array of length neig with integer elements. The band
+	              indices belonging to the eigenstates.
+	llindex       Numpy array of length neig with integer elements, or None. The
+	              Landau-level indices belonging to the eigenstates.
+	char          Numpy array of length neig with string elements, or None. The
+	              band characters of the eigenstates.
+	transitions   TransitionsData instance or None. Stores data for optical
+	              transitions. See transitions.py.
+	wffigure      Integer, string, or matplotlib Figure object. Identifier for
+	              a matplotlib figure. None is used for absence of a figure. The
+	              value is a list if separate figures are made for each state.
+  	current_step  Integer. Progress for this instance in the calculation model.
+  	ham			  Sparse matrix (or tuple of matrices) for Hamiltonian(s)
+  				  evaluated for this instance's parameters.
+	grid_index	  Integer. Position of this instance in the flattened
+	              VectorGrid. Used for priority ordering during diagonalization.
+	tuple_index   Dict instance. Stores a mapping of band indices to array
+	              indices.
+	"""
+	def __init__(self, k, eival = None, eivec = None, paramval = None, grid_index = None, opts = None):
+		if isinstance(k, Vector):
+			self.k = k
+		else:
+			self.k = Vector(k)
+		if eival is None:
+			eival = np.ndarray((0, ))
+		self.eival = eival
+		self.neig = len(eival)
+		if eivec is None:
+			self.eivec = None
+			self.dim = None
+		elif isinstance(eivec, np.matrix):  # We explicitly forbid numpy.matrix
+			raise TypeError("eivec must be an array or None")
+		elif isinstance(eivec, np.ndarray):
+			if eivec.shape[1] == self.neig:
+				self.eivec = eivec
+			elif eivec.shape[0] == self.neig:
+				self.eivec = eivec.T
+			else:
+				raise ValueError("Array eivec has size %s, but %s expected." % (eivec.shape, self.neig))
+			self.dim = self.eivec.shape[0]
+		else:
+			raise TypeError("eivec must be an array or None")
+		self.obsvals = None
+		self._obsids = None
+		self.bindex = None
+		self.llindex = None
+		self.aligned_with_e0 = False
+		self.char = None
+		self.transitions = None
+		self.wffigure = None
+		if paramval is None:
+			self.paramval = None
+		elif isinstance(paramval, (int, np.integer, float, np.floating)):
+			self.paramval = paramval
+		else:
+			self.paramval = paramval
+		self.current_step = None
+		self.ham = None
+		self.grid_index = grid_index
+		self.tuple_index = None
+		if isinstance(opts, dict):
+			self.opts = opts
+		elif opts is None:
+			self.opts = dict()
+		else:
+			raise TypeError("DDP specific opts must be a dict or None")
+		# TODO: dict
+
+	@property
+	def obsids(self):
+		return copy.copy(self._obsids)
+
+	def __str__(self):
+		"""Return human readable description."""
+		return "DDP at k %s" % self.k + (", B %s" % self.paramval) if self.paramval is not None else ""
+
+	def hash_id(self, length=6, precision='%.12e'):
+		"""Provides a stable hash value derived from k and paramval.
+
+		Arguments:
+		length		Length of the returned hexadecimal string (default: 6).
+		precision	Format specifier defining the precision of input string
+		            values for k and paramval (default: '%.12e')
+
+		Returns:
+		Hexadecimal hash string
+		"""
+		k_str = self.k.__str__(precision) if isinstance(self.k, Vector) else (precision % self.k)
+		p_str = "" if self.paramval is None else self.paramval.__str__(precision) if isinstance(self.paramval, Vector) \
+			else (precision % self.paramval)
+		return md5((k_str+p_str).encode()).hexdigest()[:length]
+
+	def file_id(self):
+		"""Provides a human readable id derived from k and paramval.
+
+		Use the default str() method (no optional arguments) for the type (being
+		either Vector or float).
+		"""
+		k_str = str(self.k).replace('(', '').replace(')', '').replace(',', '_').replace(' ', '')
+		if self.paramval is None or self.paramval == 0:
+			return k_str
+		p_str = str(self.paramval).replace('(', '').replace(')', '').replace(',', '_').replace(' ', '')
+		if self.k == 0:
+			return p_str + 'T'
+		else:
+			return k_str + '_' + p_str + 'T'
+
+	def stitch_with(self, k, eival, eivec, targetenergy_old, targetenergy_new, inplace=False, accuracy=0.01):
+		"""Stitch together multiple diagonalization solutions.
+		Overlapping duplicate eigenvalues are recalculated by weighted mean.
+		From duplicate eigenvectors, the one with the eigenvalue closer to its
+		its target value (higher weight) is chosen.
+
+		Arguments:
+		k, ... 	        See DiagDataPoint class info. The data that is added.
+		targetenergies  Target energies that have been used to calculate
+		                solutions. Used to calculate weights.
+		inplace	        Replace eigenvectors of current DiagDataPoint instance
+		                if True, otherwise return a new instance (default).
+        accuracy        Estimate of solver precision. Used to determine
+                        degeneracy of states.
+
+		Note:
+		Currently only supports bare diagonalization results without
+		observables, ll indices, etc. as those quantities are usually not yet
+		calculated. This DiagDataPoint is expected to be sorted by eival.
+		"""
+		# TODO: Does not cover the case where one of the sets fills a hole in the other set
+		tol = 5e-2
+		if k != self.k:
+			raise ValueError("Cannot extend data point with data at other momentum")
+		if len(eival) == 0:
+			raise ValueError("Not enough eigenvalues for solution stitching")
+		if eival.min() > (self.eival.min()-tol) and eival.max() < (self.eival.max()+tol):
+			# TODO: Does not distinguish between subset and filling a hole
+			raise SubsetError("New eivals are a subset of already present ones")
+		if not ((eival.min() < self.eival.min() < eival.max()) ^ (eival.min() < self.eival.max() < eival.max())):  # XOR
+			raise NoOverlapError("No overlap detected. Prev: %.4g to %.4g, New: %.4g to %.4g. Can not stitch solutions reliably" % (self.eival.min(), self.eival.max(), eival.min(), eival.max()))
+		if eivec.shape[1] == len(eival):
+			eivec1 = eivec
+		elif eivec.shape[0] == len(eival):
+			eivec1 = eivec.T
+		else:
+			raise ValueError("Array eivec has size %s, but (-, %s) expected." % (eivec.shape, len(eival)))
+		if eivec1.shape[0] != self.dim:
+			raise ValueError("Array eivec has size %s, but (%s, -) expected." % (eivec.shape, self.dim))
+		order = np.argsort(eival)
+		eival1, eivec1 = eival[order], eivec1[:, order]
+		# temp_eival = self.eival.copy()  # debug
+		new_eival, new_eivec = stitch(
+			self.eival, eival1, self.eivec, eivec1,
+			targetenergy_old, targetenergy_new, accuracy=accuracy
+		)
+		if inplace:
+			self.neig = len(new_eival)
+			self.eivec = new_eivec
+			self.eival = new_eival
+			return self
+		else:
+			return DiagDataPoint(self.k, new_eival, new_eivec)
+
+	def update(self, new_ddp):
+		"""Update the current DiagDataPoint instance from another instance.
+		This is useful if the current instance is already linked to a DiagData
+		instance. Keeps the attributes 'grid_index' and 'current_step' from the
+		current instance if not set in the new instance.
+		"""
+		if isinstance(new_ddp, DiagDataPoint):
+			if new_ddp.current_step is None:
+				new_ddp.current_step = self.current_step
+			if new_ddp.grid_index is None:
+				new_ddp.grid_index = self.grid_index
+			self.__dict__.update(new_ddp.__dict__)
+		else:
+			raise ValueError("Can only update DiagDataPoint from another DiagDataPoint instance")
+
+	def extend_by(self, k, eival, eivec, paramval = None, obsvals = None, obsids = None, char = None, llindex = None, bindex = None, accuracy = 1e-6):
+		"""Extend DiagDataPoint with additional states; prevent duplicates.
+
+		Arguments:
+		k, ...   See DiagDataPoint class info. The data that is added.
+
+		Note:
+		Arguments k and paramval serve as a check that the momentum and
+		parameter value of the added data match that of the existing data. If
+		not, an error is raised.
+		"""
+		if k != self.k:
+			raise ValueError("Cannot extend data point with data at other momentum")
+		if paramval is not None and self.paramval is not None and abs(paramval - self.paramval) > 1e-6:
+			raise ValueError("Cannot extend data point with data at other paramval")
+		if (paramval is None and self.paramval is not None) or (paramval is not None and self.paramval is None):
+			raise ValueError("Cannot extend data point with data at other paramval")
+
+		# Invalidate cached tuple indices
+		self.tuple_index = None
+
+		# Determine new eigenvalues
+		if len(self.eival) > 0:
+			newsel = np.array([np.amin(np.abs(self.eival - e)) >= accuracy for e in eival])
+		else:
+			newsel = np.array([True for e in eival])
+		if np.count_nonzero(newsel) == 0:
+			return self
+
+		# Add eigenvalues
+		self.eival = np.concatenate((self.eival, np.asarray(eival)[newsel]))
+		newneig = len(eival)
+
+		# Add eigenvectors
+		if self.eivec is not None:
+			if eivec is None:
+				self.delete_eivec()
+			elif isinstance(eivec, np.ndarray):
+				if eivec.shape[1] == newneig:
+					eivec1 = eivec
+				elif eivec.shape[0] == newneig:
+					eivec1 = eivec.T
+				else:
+					raise ValueError("Array eivec has size %s, but (-, %s) expected." % (eivec.shape, len(eival)))
+				if eivec1.shape[0] != self.dim:
+					raise ValueError("Array eivec has size %s, but (%s, -) expected." % (eivec.shape, self.dim))
+				self.eivec = np.concatenate((self.eivec, eivec[:, newsel]), axis = 1)
+			else:
+				raise TypeError("Invalid type for eivec")
+
+		# Add char, llindex, bindex (TODO: Smarter way)
+		if self.char is not None:
+			if char is None:
+				self.char = None
+			else:
+				self.char = np.concatenate((self.char, np.asarray(char)[newsel]))
+		if self.llindex is not None:
+			if llindex is None:
+				self.llindex = None
+			else:
+				self.llindex = np.concatenate((self.llindex, np.asarray(llindex)[newsel]))
+		if self.bindex is not None:
+			if bindex is None:
+				self.bindex = None
+			else:
+				self.bindex = np.concatenate((self.bindex, np.asarray(bindex)[newsel]))
+
+		# Add observables
+		if self.obsvals is not None:
+			if obsvals is None or obsids is None:
+				self.obsvals = None
+				self._obsids = None
+			if obsvals.shape[1] != newneig:
+				raise ValueError("Array obsvals has incorrect size")
+			obsidx = np.array([oidx for oidx, oid in enumerate(self._obsids) if oid in obsids])
+			self.obsvals = self.obsvals[obsidx, :]
+			self._obsids = list(np.array(self._obsids)[obsidx])
+			obsidx = np.array([self._obsids.index(oid) for oid in obsids if oid in self._obsids])
+			self.obsvals = np.concatenate((self.obsvals, obsvals[obsidx, :][:, newsel]), axis = 1)
+
+		self.neig = len(self.eival)
+		return self
+
+	def extend(self, *args, **kwds):
+		"""Extend data point; deal with either a DiagDataPoint or separate arguments.
+
+		Argument:
+		*args      Either a DiagDataPoint or an argument list that is passed to
+		           self.extend_by().
+		**kwds     Keyword arguments passed to self.extend_by().
+
+		Note:
+		If the first argument is a DiagDataPoint, all following arguments and
+		keyword arguments are ignored.
+		"""
+		if len(args) == 1 and isinstance(args[0], DiagDataPoint):
+			return self.extend_by(args[0].k, args[0].eival, args[0].eivec, paramval = args[0].paramval, obsvals = args[0].obsvals, obsids = args[0].obsids, char = args[0].char, bindex = args[0].bindex, llindex = args[0].llindex)
+		else:
+			return self.extend_by(*args, **kwds)
+
+	def set_observables(self, obsvals, obsids = None):
+		"""Set observable values.
+
+		Argument:
+		obsvals   List or array. Observable values.
+		obsids    None, list or array. Set self._obsids to this value (also
+		          valid for None).
+		"""
+		if isinstance(self.obsvals, list):
+			obsvals = np.array(obsvals)
+		if obsvals.shape[1] == self.neig:
+			self.obsvals = obsvals
+		elif obsvals.shape[0] == self.neig:
+			self.obsvals = obsvals.T
+		else:
+			raise ValueError("Argument obsvals has invalid shape")
+		if obsids is None:
+			self._obsids = None
+		elif isinstance(obsids, (list, np.ndarray)):
+			if len(obsids) != self.obsvals.shape[0]:
+				raise ValueError("Argument obsids has invalid length")
+			self._obsids = [o for o in obsids]  # force copy
+		else:
+			raise TypeError("Invalid type for argument obsids")
+		return self
+
+	def calculate_observables(self, params, obs, obs_prop = None, overlap_eivec = None, magn = None):
+		"""Calculate observables.
+
+		Arguments:
+		params    PhysParams instance. Needed to calculate the observables, see
+		          observables.py.
+		obs       List of strings. The observables that will be calculated.
+		obs_prop  ObservableList instance containing all observable properties.
+		overlap_eivec  None or a dict instance, whose keys are the band labels
+		               and values are the eigenvectors (numpy arrays). If set,
+		               calculate overlap observables. If None, no overlap
+		               observables are calculated.
+		magn      Float, Vector instance, or None. If not None, the magnetic
+		          field strength
+
+		Note:
+		Eigenvectors are required, i.e., self.eivec must not be None.
+		"""
+		if self.eivec is None:
+			sys.stderr.write("For calculation of observables, eigenvectors are necessary, but not present.\n")
+			return self
+		if obs is None or obs == []:
+			return self
+		else:
+			self._obsids = [o for o in obs]  # force copy
+			self.obsvals = observables(self.eivec, params, obs, llindex = self.llindex, overlap_eivec = overlap_eivec, magn = magn)
+		if isinstance(obs_prop, ObservableList) and obs_prop.dimful is True:
+			for j, o in enumerate(self._obsids):
+				omult = obs_prop[o].dimful_factor if o in obs_prop else 1.0
+				if omult != 1.0:
+					self.obsvals[j, :] *= omult
+		return self
+
+	def add_observable(self, obsvals = None, obsid = None):
+		"""Add the values and id of an observable.
+
+		Arguments:
+		obsvals  Numpy array or None. If set, the observable values that will be
+		         added. This array must have length neig. If None, add "NaN"
+		         values.
+		obsid    String or None. If set, add this observable id. If None, add an
+		         empty string as observable id for the new observable.
+		"""
+		if self._obsids is not None:
+			self._obsids.append("" if obsid is None else obsid)
+		if obsvals is None:
+			obsvals = np.ones((1, self.neig), dtype = float) * float("nan")
+		elif isinstance(self.obsvals, list):
+			obsvals = np.array(obsvals)
+		self.obsvals = np.concatenate((self.obsvals, obsvals), axis = 0)
+
+	def delete_eivec(self):
+		"""Delete the eigenvector data"""
+		if self.eivec is not None:
+			del self.eivec
+			self.eivec = None
+		return self
+
+	def build_tuple_index_cache(self):
+		"""Build and store a dict instance which maps tuple indices to array indices."""
+		if self.bindex is not None and self.llindex is not None:
+			self.tuple_index = {}
+			for j, l, b in zip(range(self.neig), self.llindex, self.bindex):
+				self.tuple_index[(l, b)] = j
+		elif self.bindex is not None:
+			self.tuple_index = {}
+			for j, b in enumerate(self.bindex):
+				self.tuple_index[(b,)] = j
+		return self.tuple_index
+
+	# Some 'get' functions
+	def get_index(self, val):
+		"""Get index (position of eigenstate) in the data arrays.
+
+		Argument:
+		val   If an integer, return this value. If a float, return the index of
+		      the nearest eigenvalue. If a string, return the index of the state
+		      with this character label. If a 1-tuple, return the index of the
+		      state with this band index. If a 2-tuple, return the index of the
+		      state with this LL index and band index.
+
+		Returns:
+		Integer index (from 0 to neig-1) or None if there is no match.
+		"""
+		if self.neig == 0:
+			return None
+		if isinstance(val, (int, np.integer)):  # int: index
+			return val
+		elif isinstance(val, (float, np.floating)):  # float: eigenvalue
+			return np.argmin(np.abs(self.eival - val))
+		elif isinstance(val, str):  # str: char
+			if self.char is None:
+				raise ValueError("Band characters are not defined")
+			elif val not in self.char:
+				return None
+			else:
+				return self.char.index(val)
+		elif isinstance(val, tuple) and self.tuple_index is not None:
+			return self.tuple_index.get(val)
+		elif isinstance(val, tuple) and len(val) == 1:
+			if self.bindex is None:
+				raise ValueError("Band indices are not defined")
+			elif val[0] not in self.bindex:
+				return None
+			else:
+				return list(self.bindex).index(val[0])
+		elif isinstance(val, tuple) and len(val) == 2:
+			if self.llindex is None:
+				raise ValueError("LL indices are not defined")
+			if self.bindex is None:
+				raise ValueError("Band indices are not defined")
+			else:
+				sel = (self.llindex == val[0]) & (self.bindex == val[1])
+				return None if np.count_nonzero(sel) == 0 else np.arange(0, self.neig)[sel][0]
+		else:
+			raise TypeError("Input value should be int, float, or str.")
+
+	def get_index_with_llindex(self, val, llindex):
+		"""Get index of state near energy with a specific LL index.
+
+		Arguments:
+		val      Float. Energy value.
+		llindex  Integer. The LL index to which the search is restricted.
+
+		Returns:
+		Integer index or None.
+		"""
+		if self.llindex is None:
+			raise ValueError("LL indices are not defined")
+		if not isinstance(val, float):
+			raise TypeError("Only possible with float value as input")
+		sel = (self.llindex == llindex)
+		if np.count_nonzero(sel) == 0:
+			return None
+		idx = np.arange(0, self.neig)[sel]  # restricted index array
+		eival = self.eival[sel]             # restricted eival array
+		return idx[np.argmin(np.abs(eival - val))]
+
+	def get_ubindex(self):
+		"""Get universal band index, i.e., an array of integers != 0 increasing in energy.
+		In absence of llindex, return the bindex. With llindex, take into
+		account the electrons and holes for all Landau levels.
+		"""
+		if self.bindex is None:
+			return None
+		if self.llindex is None:
+			return np.asarray(self.bindex)
+
+		# Sort by eigenvalue
+		o = np.argsort(self.eival)
+		bindex_sort = np.asarray(self.bindex)[o]
+
+		# Array of positive and negative band indices
+		pos = np.where(bindex_sort > 0, np.ones_like(bindex_sort), np.zeros_like(bindex_sort))
+		neg = 1 - pos
+		# Count positive indices for lower energies
+		# Count negative indices for higher energies
+		npos = np.cumsum(pos)
+		nneg = neg.sum() - np.cumsum(neg)
+
+		# Their difference. The neutral gap is between the states numbered 0 and 1.
+		ubindex = np.zeros_like(self.bindex)
+		ubindex[o] = npos - nneg
+		ubindex[ubindex <= 0] -= 1
+		return ubindex
+
+	def get_eival(self, val):
+		"""Look for state and return eigenvalue.
+
+		Argument:
+		val    Any value that self.get_index can handle. Specifically, if val is
+		       a float, then return the eigenvalue closest to that value.
+		"""
+		idx = self.get_index(val)
+		return None if idx is None else self.eival[idx]
+
+	def get_eival0(self):
+		"""Get energy of charge neutrality (using universal band indices)"""
+		ubindex = self.get_ubindex()
+		o = np.argsort(self.eival)
+		if ubindex is None:
+			raise ValueError("Band indices are not defined")
+
+		if ubindex.min() == 1:
+			return self.eival.min() - 0.001
+		elif ubindex.max() == -1:
+			return self.eival.max() + 0.001
+		else:
+			sel_p = (np.asarray(ubindex) > 0)
+			sel_m = (np.asarray(ubindex) < 0)
+			e_p = self.eival[sel_p].min()
+			e_m = self.eival[sel_m].max()
+			return 0.5 * (e_p + e_m)
+
+	def get_char(self, val):
+		"""Look for state and return band character"""
+		if self.char is None:
+			raise ValueError("Band characters are not defined")
+		idx = self.get_index(val)
+		return None if idx is None else self.char[idx]
+
+	def get_all_char(self):
+		"""Get all band characters.
+
+		Returns:
+		A dict, whose keys are the character labels and whose values are the
+		(energy) eigenvalues.
+		"""
+		nochar_at = []
+		if self.char is None:
+			raise ValueError("Band characters are not defined")
+		all_char = {}
+		if self.llindex is None:
+			for e, c in zip(self.eival, self.char):
+				if c == "":
+					continue
+				if c == "??":
+					nochar_at.append(e)
+				if c in all_char:
+					if abs(e - all_char[c]) > 1e-6 and c != "??":
+						sys.stderr.write("Warning (DiagDataPoint.get_all_char): Duplicate band character labels %s at different energies (%.3f and %.3f)\n" % (c, all_char[c], e))
+				else:
+					all_char[c] = e
+		else:  # get unique band label at the lowest possible LL index
+			all_char_llindex = {}
+			for e, c, lln in zip(self.eival, self.char, self.llindex):
+				if c == "":
+					continue
+				if c == "??":
+					nochar_at.append(e)
+				if c in all_char:
+					if abs(e - all_char[c]) > 1e-6 and c != "??":
+						sys.stderr.write("Warning (DiagDataPoint.get_all_char): Duplicate band character labels %s at different energies (%.3f and %.3f)\n" % (c, all_char[c], e))
+					if lln < all_char_llindex[c]:
+						all_char_llindex[c] = lln
+				else:
+					all_char[c] = e
+					all_char_llindex[c] = lln
+
+		if len(nochar_at) == self.neig and self.neig > 0:
+			sys.stderr.write("Warning (DiagDataPoint.get_all_char): Unknown band characters for all states.\n")
+		elif len(nochar_at) == 1:
+			sys.stderr.write("Warning (DiagDataPoint.get_all_char): Unknown band character at energy %.3f meV).\n" % nochar_at[0])
+		elif len(nochar_at) > 1:
+			sys.stderr.write("Warning (DiagDataPoint.get_all_char): Unknown band characters at energies %s meV.\n" % ", ".join(["%.3f" % e for e in sorted(nochar_at)]))
+
+		return all_char
+
+	def get_observable(self, obs, val = None):
+		"""Get observable values
+
+		Arguments:
+		obs   Integer, string, or None. If integer, take the n-th observable. If
+		      a string, take the observable with that obsid. If None, take all
+		      observables.
+		val   None or a value that self.get_index() can handle. If set, then
+		      return the observable value(s) for that state. If None, return
+		      values for all states.
+
+		Returns:
+		A float (if both obs and val are None) or an array of floats (1- or
+		2-dimensional (as approriate for the inputs). The value None may be
+		returned on error, i.e., if obs is not a valid observable and/or if val
+		does not refer to a valid state.
+		"""
+		if self.obsvals is None:
+			return None  # skip empty DiagDataPoint
+			# raise ValueError("Observables not available")
+		if isinstance(obs, (int, np.integer)):
+			if obs < 0 or obs >= len(self.obsvals):
+				raise ValueError("Observable index out of range")
+		elif isinstance(obs, str):
+			if self._obsids is None:
+				raise ValueError("Observable ids not available")
+			if obs not in self._obsids:
+				sys.stderr.write("Warning (DiagDataPoint.get_observable): Observable '%s' not available\n" % obs)
+				return None
+			obs = self._obsids.index(obs)
+		elif isinstance(obs, (list, np.ndarray)):  # recursive call
+			if val is not None and self.get_index(val) is None:
+				return None
+			else:
+				return np.array([self.get_observable(o, val) for o in obs])
+		elif obs is None:
+			pass
+		else:
+			raise TypeError("Invalid input for 'obs'")
+
+		if obs is None:
+			if val is None:
+				return self.obsvals
+			else:
+				idx = self.get_index(val)
+				return None if idx is None else self.obsvals[:, idx]
+		else:
+			if val is None:
+				return self.obsvals[obs, :]
+			else:
+				idx = self.get_index(val)
+				return None if idx is None else self.obsvals[obs, idx]
+
+	def set_observable_value(self, obs, bandval, obsval):
+		"""Set observable values to specific states.
+
+		Arguments:
+		obs      Integer or string. Observable index or id, respectively.
+		bandval  Float or integer number or a list or array. If numeric, look
+		         for state using self.getindex(). If a list or array, look for
+		         multiple states using self.getindex().
+		obsval   Float or array. The observable value(s). If an array, the shape
+		         must be set appropriately.
+		"""
+		if self.obsvals is None:
+			raise ValueError("Observables not available")
+		if isinstance(obs, int):
+			if obs < 0 or obs >= len(self.obsvals):
+				raise ValueError("Observable index out of range")
+		elif isinstance(obs, str):
+			if self._obsids is None:
+				raise ValueError("Observable ids not available")
+			if obs not in self._obsids:
+				self.add_observable(obsid = obs)
+				# sys.stderr.write("Warning (DiagDataPoint.get_observable): Observable '%s' not available\n" % obs)
+				# return None
+			obs = self._obsids.index(obs)
+		elif isinstance(obs, (list, np.ndarray)):  # recursive call
+			raise TypeError("Only single observable input allowed")
+		else:
+			raise TypeError("Invalid input for 'obs'")
+
+		if isinstance(bandval, (list, np.ndarray)) and isinstance(obsval, (list, np.ndarray)):
+			if len(bandval) != len(obsval):
+				raise ValueError("Band values (ids) and observable values must have same shape")
+			for bv, ov in zip(bandval, obsval):
+				idx = self.get_index(bv)
+				if idx is not None:
+					self.obsvals[obs, idx] = ov
+		elif isnumber(obsval):
+			if bandval is None:
+				self.obsvals[obs, :] = obsval
+			else:
+				idx = self.get_index(bandval)
+				if idx is not None:
+					self.obsvals[obs, idx] = obsval
+		else:
+			raise TypeError("Invalid input for 'bandval' and/or 'obsval'.")
+		return obsval
+
+	def subset(self, sel):
+		"""Take subset; can also be used for reordering
+
+		Argument:
+		sel   Integer or array. Anything that can be used as index to a numpy
+		      array.
+
+		Returns:
+		A new DiagDataPoint instance.
+		"""
+		if sel is None or len(sel) == 0:
+			# return empty instance if there is no selection (which happens if the original instance was already empty)
+			return DiagDataPoint(self.k, None, None)
+		newpt = DiagDataPoint(self.k, self.eival[sel], None if self.eivec is None else self.eivec[:, sel], paramval = self.paramval)
+		if self.obsvals is not None:
+			newpt.obsvals = self.obsvals[:, sel]
+			newpt._obsids = self._obsids
+		if self.bindex is not None:
+			newpt.bindex = np.asarray(self.bindex)[sel]
+		if self.llindex is not None:
+			newpt.llindex = np.asarray(self.llindex)[sel]
+		if self.char is not None:
+			newpt.char = np.asarray(self.char)[sel]
+		return newpt
+
+	def subset_inplace(self, sel):
+		"""Take subset and discard other states.
+
+		Argument:
+		sel   Integer or array. Anything that can be used as index to a numpy
+		      array.
+
+		Returns:
+		The present DiagDataPoint instance with only the selected states.
+		"""
+		self.tuple_index = None  # invalidate cached tuple indices
+		if sel is None or len(sel) == 0:
+			# return empty instance if there is no selection,
+			# which can happen if the original instance was already empty,
+			# however this is not necessarily the case (return self is not always enough)
+			self.eival = []
+			self.neig = 0
+			self.eivec = None
+			self._obsids = None
+			self.bindex = None
+			self.llindex = None
+			self.char = None
+			return self
+		self.eival = self.eival[sel]
+		self.neig = len(self.eival)
+		if self.eivec is not None:
+			self.eivec = self.eivec[:, sel]
+		if self.obsvals is not None:
+			self.obsvals = self.obsvals[:, sel]
+		if self.bindex is not None:
+			self.bindex = np.asarray(self.bindex)[sel]
+		if self.llindex is not None:
+			self.llindex = np.asarray(self.llindex)[sel]
+		if self.char is not None:
+			self.char = np.asarray(self.char)[sel]
+		return self
+
+	def select_llindex(self, ll):
+		"""Select states with a specific LL index.
+
+		Argument:
+		ll    Integer. The LL index.
+
+		Returns:
+		A new DiagDataPoint instance.
+		"""
+		if self.llindex is None:
+			raise ValueError("LL indices are not defined")
+		if isinstance(ll, tuple) and len(ll) == 2:
+			if ll[0] is not None and ll[1] is not None:
+				return self.subset((self.llindex >= ll[0]) & (self.llindex <= ll[1]))
+			elif ll[0] is not None:
+				return self.subset(self.llindex >= ll[0])
+			elif ll[1] is not None:
+				return self.subset(self.llindex <= ll[1])
+			else:
+				raise ValueError("Argument cannot be (None, None)")
+		else:
+			return self.subset(self.llindex == ll)
+
+	def select_bindex(self, b):
+		"""Select states with a specific band index.
+
+		Argument:
+		b     Integer. The band index.
+
+		Returns:
+		A new DiagDataPoint instance.
+		"""
+		if self.bindex is None:
+			raise ValueError("Band indices are not defined")
+		if isinstance(b, tuple) and len(b) == 2:
+			if b[0] is not None and b[1] is not None:
+				return self.subset((self.bindex >= b[0]) & (self.bindex <= b[1]))
+			elif b[0] is not None:
+				return self.subset(self.bindex >= b[0])
+			elif b[1] is not None:
+				return self.subset(self.bindex <= b[1])
+			else:
+				raise ValueError("Argument cannot be (None, None)")
+		else:
+			return self.subset(self.bindex == b)
+
+	def select_obs(self, obs, val, accuracy = None):
+		"""Select states by observable value.
+
+		Arguments:
+		obs       String. The observable id.
+		val       Number, 2-tuple, or list. If a number, match the value
+		          exactly or approximately. If a 2-tuple treat the two values
+		          (numeric or None) as lower and upper bound for a search
+		          interval. If a list, match any value in the list.
+		accuracy  None or positive float. Test equality with this accuracy. If
+		          None, match exactly. This only applies to testing equalities,
+		          i.e., if val is a number.
+
+		Returns:
+		A new DiagDataPoint instance.
+		"""
+		if self._obsids is None or self.obsvals is None:
+			raise ValueError("Observables not present")
+		if obs not in self._obsids:
+			raise IndexError("Observable %s not defined" % obs)
+		obsidx = self._obsids.index(obs)
+		if isnumber(val):
+			if accuracy is None:
+				sel = (self.obsvals[obsidx, :] == val)
+			else:
+				sel = (np.abs(self.obsvals[obsidx, :] - val) < accuracy)
+		elif isinstance(val, tuple) and len(val) == 2 and (isnumbernone(val[0]) and isnumbernone(val[1])):
+			if val[0] is not None and val[1] is not None:
+				sel = (self.obsvals[obsidx, :] >= val[0]) & (self.obsvals[obsidx, :] <= val[1])
+			elif val[0] is not None:
+				sel = (self.obsvals[obsidx, :] >= val[0])
+			elif val[1] is not None:
+				sel = (self.obsvals[obsidx, :] <= val[1])
+			else:
+				raise ValueError("Interval specification cannot be (None, None)")
+		elif isinstance(val, list):
+			sel = np.isin(self.obsvals[obsidx, :], val)
+		else:
+			raise TypeError("Argument val must be numeric, 2-tuple, or list")
+
+		return self.subset(sel)
+
+	def select_eival(self, val):
+		"""Select states by eigenvalue.
+
+		Arguments:
+		val   Number, 2-tuple, or list. If a number, match the value exactly. If
+		      a 2-tuple treat the two values (numeric or None) as lower and
+		      upper bound for a search interval. If a list, match any value in
+		      the list.
+
+		Returns:
+		A new DiagDataPoint instance.
+		"""
+		if isnumber(val):
+			sel = (self.eival == val)
+		elif isinstance(val, tuple) and len(val) == 2 and (isnumbernone(val[0]) and isnumbernone(val[1])):
+			if val[0] is not None and val[1] is not None:
+				sel = (self.eival >= val[0]) & (self.eival <= val[1])
+			elif val[0] is not None:
+				sel = (self.eival >= val[0])
+			elif val[1] is not None:
+				sel = (self.eival <= val[1])
+			else:
+				raise ValueError("Interval specification cannot be (None, None)")
+		elif isinstance(val, list):
+			sel = np.isin(self.eival, val)
+		else:
+			raise TypeError("Argument val must be numeric, 2-tuple, or list")
+		return self.subset(sel)
+
+	def select_char(self, which, inplace = False):
+		"""Select states by band character.
+
+		Arguments:
+		which  String or list. If a string, look for band characters that start
+		       with this string. If a list, match any string in the list.
+
+		Returns:
+		A new DiagDataPoint instance (inplace = False) or the present instance
+		(inplace = True).
+		"""
+
+		if self.char is None:
+			raise ValueError("Band characters not present")
+		if isinstance(which, str):
+			sel = np.array([c.startswith(which) for c in self.char], dtype = bool)
+		elif isinstance(which, list):
+			sel = np.array([False for c in self.char], dtype = bool)
+			for w in which:
+				sel |= np.array([c.startswith(w) for c in self.char], dtype = bool)
+		else:
+			raise TypeError("Argument 'which' should be a string or a list")
+		return self.subset_inplace(sel) if inplace else self.subset(sel)
+
+	def sort_by_eival(self, inplace = False, reverse = False):
+		"""Sort by eigenvalues.
+
+		Arguments:
+		inplace   True or False. Whether to return a new instance (False) or the
+		          present one (True).
+		reverse   True or False. Reverse or standard sorting direction.
+
+		Returns:
+		New or present DiagDataPoint instance.
+		"""
+		order = np.argsort(-self.eival) if reverse else np.argsort(self.eival)
+		return self.subset_inplace(order) if inplace else self.subset(order)
+
+	def sort_by_obs(self, obs, inplace = False):
+		"""Sort by eigenvalues.
+
+		Arguments:
+		obs       String. Observable id.
+		inplace   True or False. Whether to return a new instance (False) or the
+		          present one (True).
+
+		Returns:
+		New or present DiagDataPoint instance.
+		"""
+		if self._obsids is None or self.obsvals is None:
+			raise ValueError("Observables not present")
+		if obs not in self._obsids:
+			raise IndexError("Observable %s not defined" % obs)
+		obsidx = self._obsids.index(obs)
+		order = np.argsort(self.obsvals[obsidx, :])
+		return self.subset_inplace(order) if inplace else self.subset(order)
+
+	def set_eivec_phase(self, accuracy = 1e-6, inplace = False):
+		"""Multiply each eigenvector by a phase factor to fix the arbitrary phase.
+
+		For each eigenvector, look for the largest absolute component |psi_i|
+		and divide by the phase psi_i / |psi_i|. The result is that the
+		resulting eigenvector will have Im(psi_i) = 0 and Re(psi_i) > 0. If
+		there are multiple values psi_i of almost the same size, choose the
+		largest i.
+
+		Arguments:
+		accuracy  Float. Fuzziness of determining which psi_i are considered
+		          maximal. The value is relative to the maximum |psi_i|.
+		inplace   True or False. Whether to return a new instance (False) or the
+		          present one (True).
+
+		Returns:
+		New or present DiagDataPoint instance.
+		"""
+		if self.eivec is None:
+			raise ValueError("For setting eigenvector phases, the eigenvectors are necessary, but not present.")
+
+		new_eivec = np.zeros_like(self.eivec)
+		for i in range(0, self.neig):
+			vec = self.eivec[:, i]
+			maxabs = np.max(np.abs(vec))
+			threshold = (1.0 - accuracy) * maxabs
+			allmax = (np.abs(vec) >= threshold)
+			if np.count_nonzero(allmax) == 0:  # should never happen
+				new_eivec[:, i] = 1. * vec
+				continue
+			maxval = vec[allmax][-1]
+			phase = maxval / np.abs(maxval)
+			new_eivec[:, i] = vec / phase
+
+		if inplace:
+			self.eivec = new_eivec
+			return self
+		else:
+			selall = np.full(self.neig, True, dtype=bool)
+			newpt = self.subset(selall)  # 'abuse' self.subset() to create a copy
+			newpt.eivec = new_eivec
+			return newpt
+
+	def get_eivec_coeff(self, norbitals, accuracy = 1e-6, ll_full = False, ny = None):
+		"""Get complex coefficients for each orbital, for each eigenvector
+		The coefficients are extracted for each orbital as the eigenvector
+		component where the absolute value is maximal. If this happens at
+		multiple locations, then choose	the value at the largest index
+		(equivalent to largest z value).
+
+		Arguments:
+		norbitals   6 or 8. The number of orbitals.
+		accuracy    Float. The 'fuzziness' of determining which values are
+		            considered maximal. This is a relative number in terms of
+		            the maximal absolute value.
+		ll_full     True or False. If True, take a section of the eigenvector
+		            corresponding to the Landau level with the largest weight.
+		            If False (default), use the full eigenvector.
+		ny          None or integer. The size in the 'y direction'; for LL mode,
+		            this value serves as number of LLs in the basis. Required to
+		            be set if ll_full is True, otherwise it is ignored.
+
+		Returns:
+		coeff       Numpy array of shape (neig, norbitals) and type complex.
+		"""
+		if ll_full and ny is None:
+			raise ValueError("If argument ll_full is True, argument ny must be set.")
+		coeff = np.zeros((self.neig, norbitals), dtype=complex)
+		for i in range(0, self.neig):
+			vec = self.eivec[:, i]
+			# orbvec = vec.reshape((-1, norbitals))
+			# maxabs = np.amax(np.abs(orbvec), axis = 0)
+			if ll_full and ny is not None:  # For full LL mode, take section
+				vec0 = np.reshape(vec, (ny, -1))
+				absvec2 = np.abs(vec0)**2
+				ny_sect = np.argmax(np.sum(absvec2, axis = 1))
+				vec = vec0[ny_sect, :]
+			for j in range(0, norbitals):
+				orbvec = vec[j::norbitals]
+				maxabs = np.max(np.abs(orbvec))
+				threshold = (1.0 - accuracy) * maxabs
+				allmax = (np.abs(orbvec) >= threshold)
+				if np.count_nonzero(allmax) > 0:  # should always happen
+					coeff[i, j] = 1. * orbvec[allmax][-1]
+		return coeff
+
+	def set_char(self, chardata, eival = None, llindex = None, eival_accuracy = 1e-6):
+		"""Set band characters.
+
+		Arguments:
+		chardata   List or array of strings. The character data. If a DiagDataPoint
+				   is given, extract all arguments in a recursive call.
+		eival      List/array or None. If None, set chardata to self.char as is.
+		           If a list or array of numbers, then match these values to the
+		           eigenvalues (self.eival).
+		llindex    Integer or None. If set, match only states with this LL
+		           index. Only works if eival is not None.
+
+		Returns:
+		The present DiagDataPoint instance.
+		"""
+		if isinstance(chardata, list) and eival is None:
+			if len(chardata) != self.neig:
+				raise ValueError("Input list has incorrect length")
+			for c in chardata:
+				if not isinstance(c, str):
+					raise TypeError("Input list must contain strings only")
+			self.char = chardata
+			return self
+		elif isinstance(chardata, (list, np.ndarray)):
+			if not isinstance(eival, (list, np.ndarray)):
+				raise TypeError("Eigenvalue data must be list or array")
+			if not len(chardata) == len(eival):
+				raise ValueError("Band character and eigenvalue input must be of equal length")
+			if self.char is None:
+				self.char = ["" for _ in self.eival]
+			n_warnings_off = 0
+			n_warnings_dup = 0
+
+			for i1, e1 in enumerate(self.eival):
+				if llindex is not None and self.llindex is not None and self.llindex[i1] != llindex:
+					continue
+				i2 = np.argmin(np.abs(eival - e1))
+				e2 = eival[i2]
+				if abs(e1 - e2) > eival_accuracy:
+					n_warnings_off += 1
+				elif chardata[i2] in self.char:
+					if llindex is not None:
+						# For the LL mode, this happens regularly, because at B = 0
+						# there are many duplicate energies (different LLs)
+						self.char[i1] = chardata[i2]
+					elif '?' not in chardata[i2]:
+						# If the band character is unknown, do not count it as duplicate,
+						# as this will issue misleading warning messages.
+						n_warnings_dup += 1
+				else:
+					self.char[i1] = chardata[i2]
+
+			if n_warnings_off > 0:
+				sys.stderr.write("Warning (DiagDataPoint.set_char): Poor eigenvalue match for %i input values\n" % n_warnings_off)
+			if n_warnings_dup > 0 and (llindex is None or llindex > 0):  # do not print this warning for the lower LLs, where this is normal
+				sys.stderr.write("Warning (DiagDataPoint.set_char): Duplicate eigenvalue match for %i input values\n" % n_warnings_dup)
+			return self
+
+		elif isinstance(chardata, DiagDataPoint):
+			return self.set_char(chardata.char, chardata.eival, llindex, eival_accuracy)  # recursive call
+		else:
+			raise TypeError
+
+	def set_bindex(self, bindexdata, eival = None, llindex = None, aligned_with_e0 = False):
+		"""Set band indices.
+
+		Arguments:
+		bindexdata  List or array of integers. The band indices.
+		eival       List/array or None. If None, set bindexdata to self.bindex
+		            as is. If a list or array of numbers, then match these
+		            values to the eigenvalues (self.eival).
+		llindex     Integer or None. If set, match only states with this LL
+		            index. Only works if eival is not None.
+		aligned_with_e0  True or False. Whether the band indices were aligned
+		                 with the zero energy. This should be set to True if the
+		                 band indices were set directly from e0, or is the band
+		                 indices are obtained from a BandAlignPoint with
+		                 aligned_with_e0 set to True.
+
+		Returns:
+		The present DiagDataPoint instance.
+		"""
+		if self.neig == 0:
+			return self
+		self.tuple_index = None  # invalidate cached tuple indices
+		self.aligned_with_e0 = aligned_with_e0
+		if isinstance(bindexdata, (list, np.ndarray)) and eival is None:
+			if len(bindexdata) != self.neig:
+				raise ValueError("Input list has incorrect length")
+			for bi in bindexdata:
+				if not isinstance(bi, (int, np.integer)):
+					raise TypeError("Input list must contain integers only")
+			self.bindex = bindexdata
+			return self
+		elif isinstance(bindexdata, (list, np.ndarray)):
+			if not isinstance(eival, (list, np.ndarray)):
+				raise TypeError("Eigenvalue data must be list or array")
+			if not len(bindexdata) == len(eival):
+				raise ValueError("Band-index and eigenvalue input must be of equal length")
+			if not isinstance(self.bindex, np.ndarray):  # initialize array only if not yet there
+				self.bindex = np.zeros(self.neig, dtype = int)
+			n_warnings = 0
+			for bi, e in zip(bindexdata, eival):
+				i = self.get_index(e) if llindex is None else self.get_index_with_llindex(e, llindex)
+				if i is None:
+					continue
+				if self.bindex[i] != 0 or abs(self.eival[i] - e) > 1e-6:
+					n_warnings += 1
+				self.bindex[i] = bi
+			if n_warnings > 0:
+				sys.stderr.write("Warning (DiagDataPoint.set_bindex): Poor or duplicate eigenvalue match for %i input values\n" % n_warnings)
+			return self
+		elif isinstance(bindexdata, DiagDataPoint):
+			return self.set_bindex(
+				bindexdata.bindex, eival = bindexdata.eival, llindex = llindex,
+				aligned_with_e0 = bindexdata.aligned_with_e0
+			)  # recursive call
+		elif bindexdata is None:
+			self.bindex = None
+			return self
+
+	def set_llindex(self, llindex):
+		"""Set band indices.
+
+		Arguments:
+		llindex  List or array of integers. The LL indices. These are set to
+		         self.llindex as is.
+
+		Returns:
+		The present DiagDataPoint instance.
+		"""
+		if not isinstance(llindex, (list, np.ndarray)):
+			raise TypeError("Input llindex must be array-like")
+		if len(llindex) != self.neig:
+			raise ValueError("Input llindex has incorrect length")
+		self.llindex = np.asarray(llindex)
+		self.tuple_index = None  # invalidate cached tuple indices
+		return self
+
+	def set_eivec(self, eivec, val = None):
+		"""Set eigenvectors.
+
+		Arguments:
+		eivec   Numpy array or DiagDataPoint instance. If an array, this is the
+		        eigenvector data that will be set to self.eivec. If a
+		        DiagDataPoint instance, copy the eigenvector data from there.
+		val     None or list/array of values that match using self.get_index().
+		        If set, the specified input data is applied to the matching
+		        states. If none, then the data is applied as is.
+
+		Returns:
+		The present DiagDataPoint instance.
+		"""
+
+		if isinstance(eivec, DiagDataPoint):  # recursive call
+			return self.set_eivec(eivec.eivec, eivec.eival)
+		if isinstance(eivec, np.ndarray):
+			if eivec.shape[0] == self.dim:
+				eivec1 = eivec
+			elif eivec.shape[1] == self.dim:
+				eivec1 = eivec.T
+			else:
+				raise ValueError("Arrbay eivec has size %s, but %s expected." % (eivec.shape, self.dim))
+		else:
+			raise TypeError("eivec must be an array")
+		if val is None:
+			if eivec1.shape[1] != self.neig:
+				raise ValueError("Array eivec has size %s, but %s expected." % (eivec.shape, self.dim))
+			self.eivec = eivec1
+		else:
+			if len(val) != eivec1.shape[1]:
+				raise ValueError("Array val has size %s, but %s expected." % (len(val), eivec1.shape[1]))
+			self.eivec = np.zeros((self.dim, self.neig), dtype = complex)
+			for v, ei in zip(val, eivec1.T):
+				idx = self.get_index(v)
+				if isinstance(v, float) and abs(v - self.eival[idx]) < 1e-6:
+					self.eivec[:, idx] = ei
+			zero_eivec = 0
+			for ei in self.eivec.T:
+				if np.all(ei == 0.0):
+					zero_eivec += 1
+			if zero_eivec > 0:
+				sys.stderr.write("Warning (DiagDataPoint.set_eivec): %i eigenvectors out of %i undefined.\n" % (zero_eivec, self.neig))
+		return self
+
+	def filter_transitions(self, ee, broadening=None, ampmin=100, inplace=False):
+		"""Filter transitions by energy
+
+		See DiagData.filter_transitions() and TransitionsData.at_energy() for
+		more information.
+		"""
+		if self.transitions is None:
+			return self
+		filtered_transitions = self.transitions.at_energy(ee, broadening=broadening, index=self.grid_index, ampmin=ampmin)
+
+		if inplace:
+			self.transitions = filtered_transitions
+			return self
+		else:
+			new_ddp = copy.copy(self)
+			new_ddp.transitions = filtered_transitions
+			return new_ddp
+
+	def to_binary_file(self, filename):
+		"""Save data to a binary file (Numpy npz or HDF5) file.
+		This function saves all fields (member variables) specified in global
+		variable binfile_ddp_fields as well as the x values (momentum and/or
+		parameter value).
+
+		For Numpy format: The file is a compressed npz file with a collection of
+		numpy arrays. For more information on the file format, consult:
+		https://numpy.org/doc/stable/reference/generated/numpy.lib.format.html
+
+		For HDF5 format: The file is a HDF5 container and the data is saved in a
+		separate group for each DiagDataPoint. The values for k and b are stored
+		as attributes. We do not use compression because it would reduce the
+		file size only minimally. See also:	https://docs.h5py.org
+
+		Argument:
+		filename   String. The file name. The output type is extracted from the
+		           file name extension.
+
+		No return value
+		"""
+		## Do a check of fields on first data point
+		for field in binfile_ddp_fields:
+			if field not in dir(self):
+				raise AttributeError("Field %s is not a valid member of DiagDataPoint class." % field)
+
+		## Gather data from DiagDataPoint instances
+		ddp_data = {}
+		for field in binfile_ddp_fields:
+			if isinstance(getattr(self, field), np.ndarray):  # also excludes None
+				ddp_data[field] = getattr(self, field)
+
+		## Gather data from k and paramval
+		x_data = {}
+		if isinstance(self.k, Vector):
+			comp = self.k.components(prefix = 'k')
+			for co in comp:
+				x_data[co] = self.k.component(co, prefix = 'k')
+		elif isinstance(self.k, (tuple, list, float, complex, np.floating, np.complex_)):
+			x_data['k'] = self.k
+		else:
+			sys.stderr.write("Warning (DiagDataPoint.to_binary_file): Data type of k value is invalid.\n")
+		if self.paramval is None:
+			pass  # This should pass silently
+		elif isinstance(self.paramval, Vector):
+			comp = self.paramval.components(prefix = 'b')
+			for co in comp:
+				x_data[co] = self.paramval.component(co, prefix = 'b')
+		elif isinstance(self.paramval, (tuple, list, float, complex, np.floating, np.complex_)):
+			x_data['b'] = self.paramval
+		else:
+			sys.stderr.write("Warning (DiagDataPoint.to_binary_file): Data type of parameter value is invalid.\n")
+
+		ext = filename.split('.')[-1]
+		if ext == 'npz':
+			try:
+				np.savez_compressed(filename, **x_data, **ddp_data)
+			except:
+				sys.stderr.write("ERROR (DiagDataPoint.to_binary_file): Failed to write to Numpy binary file '%s'\n" % filename)
+		elif ext in ['h5', 'hdf5']:
+			groupname = 'ddp_' + self.file_id() + '_' + self.hash_id()
+			try:
+				hdf5o.append_retry(filename, groupname, data = ddp_data, attr = x_data)
+			except:
+				sys.stderr.write("ERROR (DiagDataPoint.to_binary_file): Failed to write to HDF5 binary file '%s'\n" % filename)
+				raise
+		else:
+			sys.stderr.write("ERROR (DiagDataPoint.to_binary_file): Unknown file type/extension '%s'\n" % ext)
+		return
+
+	## Compatibility / legacy functions; remove later
+	def __getitem__(self, i):
+		raise NotImplementedError
+
+	def __len__(self):
+		raise NotImplementedError
+
+### DIAGDATA ###
+class DiagData(types.DiagData):
+	"""Container for DiagDataPoint instances
+
+	Attributes:
+	data     List of DiagDataPoint instances
+	shape    Shape of the data point array (of momentum or magnetic field
+	         values)
+	grid     A VectorGrid instance or None.
+	gridvar	 String. The grid variable.
+	"""
+	def __init__(self, data, shape = None, grid = None, gridvar = None):
+		if isinstance(data, DiagDataPoint):
+			self.data = [data, ]
+		elif isinstance(data, list):
+			for d in data:
+				if not isinstance(d, DiagDataPoint):
+					raise TypeError("List elements should be DiagDataPoint instances.")
+			self.data = data
+		elif data is None:
+			self.data = []
+		else:
+			raise TypeError("Input should be a DiagDataPoint or a list thereof.")
+		self.shape = ()  # Initialized by self.set_shape()
+		self.strides = ()  # Initialized by self.set_shape()
+		if isinstance(grid, VectorGrid):
+			if shape is not None:
+				raise ValueError("Argument shape cannot be used together with grid")
+			if gridvar is not None:
+				raise ValueError("Argument gridvar cannot be used together with grid")
+			self.set_shape(grid.shape)
+			self.grid = grid
+			self.gridvar = grid.prefix
+		elif grid is None:
+			self.set_shape(shape)
+			self.grid = None
+			self.gridvar = '' if gridvar is None else gridvar
+		else:
+			raise TypeError("Argument grid must be a VectorGrid instance or None")
+		self.bindex_cache = None
+
+	def align_with_grid(self):
+		"""Rearrange the data points in case the order does not match that of self.grid (VectorGrid instance).
+		This can be the case, for example, if the data was constructed from
+		multiple lower-dimensional grids.
+		"""
+		if self.grid is None:
+			raise ValueError("Data does not contain a VectorGrid member")
+		if len(self.data) != len(self.grid):
+			raise ValueError("Data and grid shapes are incompatible")
+		kvald = [d.k for d in self.data] if self.gridvar == 'k' else [d.paramval for d in self.data]
+		kvalg = [k for k in self.grid]
+
+		equal_arrays = True
+		for kd, kg in zip(kvald, kvalg):
+			if kd != kg:
+				equal_arrays = False
+				break
+		if equal_arrays:
+			return  # Nothing to be done
+
+		# Try with transposition (TODO: Three dimensions)
+		if len(self.grid.shape) == 2:
+			multi_indices = np.array([[i0, i1] for i1 in range(0, self.grid.shape[0]) for i0 in range(0, self.grid.shape[1])])
+			indices = np.ravel_multi_index(multi_indices.T, (self.shape[1], self.shape[0]))
+			equal_arrays = True
+			for j1, j2 in enumerate(indices):
+				k2 = self.data[j2].k if self.gridvar == 'k' else self.data[j2].paramval
+				if k2 != self.grid[j1]:
+					equal_arrays = False
+					break
+			if equal_arrays:
+				newdata = [self.data[j] for j in indices]
+				self.data = newdata
+				self.set_shape(self.grid.shape)
+				return
+		elif len(self.grid.shape) == 3:
+			raise NotImplementedError("Only implemented for two dimensions at this moment")
+
+		# Fallback:
+		if self.gridvar == 'k':
+			newdata = [self.find(k) for k in self.grid]
+		else:
+			newdata = [self.find(0.0, p) for p in self.grid]
+		if sum([1 if d is None else 0 for d in newdata]):
+			raise ValueError("Data points and grid points have different momenta")
+		self.data = newdata
+		self.set_shape(self.grid.shape)
+		return
+
+	def sort_by_grid(self):
+		"""Sort data by grid (only if grid is a valid VectorGrid instance)."""
+		if self.grid is None:
+			sys.stderr.write("Warning (DiagData.sort_by_grid): Data cannot be sorted in absence of VectorGrid instance.\n")
+			return
+		if self.grid.is_sorted():
+			return
+		newgrid, indices = self.grid.sort(in_place = True, flat_indices = True)
+		newdata = [self.data[i] for i in indices]
+		self.data = newdata
+
+	def get_momenta(self):
+		"""Get list of momenta"""
+		return [d.k for d in self.data]
+
+	def get_momentum_grid(self):
+		"""Get grid values of the momentum grid.
+
+		Returns:
+		If self.grid is a VectorGrid instance containing momentum, return that.
+		Otherwise, return a 1-, 2-, or 3-tuple with each element being a list
+		of the components of the momenta.
+		"""
+		if self.grid is not None and self.gridvar == 'k':
+			return self.grid
+		elif len(self.shape) == 1:
+			return ([d.k for d in self.data],)
+		elif len(self.shape) == 2:
+			return ([d.k[0] for d in self.data[:self.shape[1]]], [d.k[1] for d in self.data[::self.shape[1]]])
+		elif len(self.shape) == 3:
+			stride0 = self.shape[1] * self.shape[2]
+			stride1 = self.shape[2]
+			return ([d.k[2] for d in self.data[:self.shape[2]]], [d.k[1] for d in self.data[:(self.shape[1] * stride1):stride1]], [d.k[0] for d in self.data[::stride0]])
+		else:
+			raise ValueError("Invalid dimension")
+
+	def get_paramval(self, component = None):
+		"""Get a list of parameter values (magnetic field).
+
+		Argument:
+		component    None or string. If None, return VectorGrid instance or list
+		             of Vector instances if applicable. If a string, extract
+		             that vector component for all Vector instances in the list
+		             or grid; floats are left alone. The return value is then
+		             always a list of numerical values.
+
+		Returns:
+		If self.grid is a VectorGrid instance not containing momentum, return
+		that. Otherwise, return a list of parameter values if they are set, or
+		None if not.
+		"""
+		if self.grid is not None and self.gridvar != 'k':
+			values = self.grid
+		elif len(self.data) == 0:
+			return []
+		elif self.gridvar != 'k':
+			values = [d.paramval for d in self.data]
+		elif self.data[0].paramval is None:
+			return None
+		else:
+			values = [d.paramval for d in self.data]
+		if component is None:
+			return values
+		if all([isinstance(val, (float, np.floating, int, np.integer)) for val in values]):
+			return values
+		pf = self.gridvar if isinstance(self.gridvar, str) and component.startswith(self.gridvar) else ''
+		return [val.component(component, prefix = pf) if isinstance(val, Vector) else val for val in values]
+
+	def get_xval(self, index = None):
+		"""Get 'x-values', that can serve as natural coordinates of the horizontal axis of a plot.
+
+		Argument:
+		index   Integer, tuple, or None. If an integer or tuple, return that
+		        element from the grid or list of k or B values. If None, return
+		        the whole grid or list.
+		"""
+		if self.grid is not None and index is not None:
+			return self.grid[index]
+		elif self.gridvar == 'k' or self.gridvar == '':
+			xval = self.get_momenta()
+		else:
+			xval = self.get_paramval()
+		return xval if index is None else xval[index]
+
+	def get_degrees(self, default = None):
+		"""Is the unit of angular quantities degrees?
+
+		Argument:
+		default   Value to return if the unit cannot be determined otherwise.
+
+		Returns:
+		True or False (meaning degrees or radians, respectively) or the value
+		set by argument default.
+		"""
+		if self.grid is not None:
+			return self.grid.degrees
+		elif len(self) == 0:
+			return default
+		elif isinstance(self.data[0].paramval, Vector):
+			return self.data[0].paramval.degrees
+		elif isinstance(self.data[0].k, Vector):
+			return self.data[0].k.degrees
+		else:
+			return default
+
+	def get_zero_point(self, return_index = False, ignore_paramval = False):
+		"""Get the point at zero momentum and/or magnetic field.
+
+		Argument:
+		return_index     True or False. If True, return the DiagDataPoint
+		                 instance and its index in the list (self.data). If
+		                 False, return the DiagDataPoint instance only.
+		ignore_paramval  True or False. If True, return the data point at zero
+		                 momentum without caring about the value of the magnetic
+		                 field; this applies only if the grid variable is
+		                 momentum. If False, only return the point where
+		                 momentum and field are both zero.
+
+		Returns:
+		ddp       DiagDataPoint instance
+		index     Integer (optional)
+
+		Note:
+		If there is no zero point, return None or None, None.
+		"""
+		global zero_point_warning_issued
+		izero = []
+		for i, d in enumerate(self.data):
+			if d.k == 0.0 and (d.paramval is None or abs(d.paramval) < 1e-10):
+				izero.append(i)
+			elif d.k == 0.0 and ignore_paramval and self.gridvar == 'k':
+				izero.append(i)
+		if len(izero) == 0:
+			if return_index:
+				return None, None
+			else:
+				return None
+		else:
+			if len(izero) > 1 and not zero_point_warning_issued:
+				sys.stderr.write("Warning (get_zero_point): More than one 'zero point' found.\n")
+				zero_point_warning_issued = True
+			if return_index:
+				return self.data[izero[0]], izero[0]
+			else:
+				return self.data[izero[0]]
+
+	def get_base_point(self, return_index = False):
+		"""Get base point, where the momentum and magnetic-field components are minimal.
+
+		In contrast to get_zero_point(), this function always returns a value.
+		The function minimizes the absolute values of the momentum and magnetic-
+		field components.
+
+		Argument:
+		return_index     True or False. If True, return the DiagDataPoint
+		                 instance and its index in the list (self.data). If
+		                 False, return the DiagDataPoint instance only.
+
+		Returns:
+		ddp       DiagDataPoint instance
+		index     Integer (optional)
+		"""
+		if len(self.data) == 1:
+			idx = 0
+		elif isinstance(self.grid, VectorGrid):
+			grid_arr = self.grid.get_array()
+			tuple_idx = tuple(np.argmin(np.abs(val)) for val in grid_arr)
+			idx = np.sum([ii * ss for ii, ss in zip(tuple_idx, self.strides)])
+		else:
+			def tt(x):  # convert value to tuple
+				return tuple(abs(xi) for xi in x.value) if isinstance(x, Vector) else (abs(x),) if isinstance(x, (float, np.floating)) else (0.0,) if x is None else (np.inf,)
+			kmin = tt(self.data[0].k)
+			bmin = tt(self.data[0].paramval)
+			idx = 0
+			for i, d in enumerate(self.data):
+				if tt(d.k) < kmin or tt(d.paramval) < bmin:
+					kmin = tt(d.k)
+					bmin = tt(d.paramval)
+					idx = i
+		return (self.data[idx], idx) if return_index else self.data[idx]
+
+	def get_total_neig(self):
+		"""Get the total number of states summed over all points in self.data"""
+		return sum([d.neig for d in self.data])
+
+	def select_llindex(self, llval):
+		"""Select all states with a specific LL index.
+
+		Argument:
+		llval  Integer. The LL index. See DiagDataPoint.select_llindex().
+
+		Returns:
+		A new DiagData instance.
+		"""
+		if len(self.data) == 0:
+			return None
+		elif self.data[0].llindex is None:
+			return None
+		if self.grid is not None:
+			return DiagData([d.select_llindex(llval) for d in self.data], grid = self.grid)
+		else:
+			return DiagData([d.select_llindex(llval) for d in self.data], shape = self.shape, gridvar = self.gridvar)
+
+	def select_eival(self, val):
+		"""Select all states matching (a) specific (range of) eigenvalue(s).
+
+		Argument:
+		val    Number, list, or 2-tuple. Matching values. See
+		       DiagDataPoint.select_eival().
+
+		Returns:
+		A new DiagData instance.
+		"""
+		if len(self.data) == 0:
+			return None
+		if self.grid is not None:
+			return DiagData([d.select_eival(val) for d in self.data], grid = self.grid)
+		else:
+			return DiagData([d.select_eival(val) for d in self.data], shape = self.shape, gridvar = self.gridvar)
+
+	def set_char(self, chardata, eival = None, llindex = None, eival_accuracy = 1e-6):
+		"""Set band character values at zero point
+
+		Argument:
+		chardata  List of strings. The band characters. If DiagData(Point)
+				  is given, extract all arguments automatically.
+		eival     List/array or None. If set, set band characters for these
+		          eigenvalues.
+		llindex   Integer or None. If set, constrain to states with this LL
+		          index.
+
+		Note:
+		See DiagDataPoint.set_char() for more details on arguments.
+
+		Returns:
+		The DiagData point at 'zero' or None if it does not exist.
+		"""
+
+		data_k0 = self.get_zero_point()
+		if data_k0 is None:
+			sys.stderr.write("Warning (DiagData.set_char): Cannot find 'zero point', so cannot add band characters\n")
+			return None
+		if isinstance(chardata, DiagData):
+			chardata1 = chardata.get_zero_point()
+		else:
+			chardata1 = chardata
+		return data_k0.set_char(chardata1, eival, llindex, eival_accuracy)
+
+	def get_all_char(self):
+		"""Get band characters at zero point.
+
+		Returns:
+		A dict instance of the form {'char': eigenvalue, ...}. See
+		DiagDataPoint.get_all_char() for details.
+		"""
+		data_k0 = self.get_zero_point()
+		if data_k0 is None or data_k0.char is None:
+			return None
+		else:
+			return data_k0.get_all_char()
+
+	def get_all_llindex(self):
+		"""Get a sorted list of all present LL indices.
+
+		Returns:
+		A list of integers, or None if LL indices are not defined.
+		"""
+		if len(self.data) == 0:
+			return None
+		if all(d.llindex is None for d in self.data):
+			return None
+		llidx = set()
+		for d in self.data:
+			if d.llindex is not None:
+				llidx = llidx | set(list(d.llindex))
+		return sorted(list(llidx))
+
+	def reset_bindex(self):
+		"""Reset band indices.
+		This function should be called when applying band indices to the points
+		in self.data."""
+		self.bindex_cache = None  # invalidate cached list of band indices
+		for d in self.data:
+			d.set_bindex(None)
+
+	@property
+	def aligned_with_e0(self):
+		"""Boolean property that tells whether all data points are aligned with E0"""
+		return all(d.aligned_with_e0 for d in self.data)
+
+	def get_e_neutral(self, flat=False):
+		"""Get neutral energies from data (for use in density functions)"""
+		shape = len(self.data) if flat else self.shape
+		try:
+			result = np.array([d.get_eival0() for d in self.data], dtype=float)
+		except ValueError:
+			return None
+		if np.count_nonzero(np.isnan(result)) > 0:
+			return None
+		return result.reshape(shape)
+
+	def get_all_bindex(self):
+		"""Get a sorted list of all possible band indices."""
+		if len(self.data) == 0:
+			return None
+		if self.data[0].bindex is None:
+			return None
+		if self.bindex_cache is not None:
+			return self.bindex_cache
+		bidx = set([])
+		if self.data[0].llindex is None:
+			for d in self.data:
+				if d.bindex is None:
+					continue
+					# return None
+				bidx = bidx | set(list(d.bindex))
+		else:
+			for d in self.data:
+				if d.llindex is None or d.bindex is None:
+					continue
+					# return None
+				bidx = bidx | set(zip(d.llindex, d.bindex))
+		self.bindex_cache = sorted(list(bidx))
+		return self.bindex_cache
+
+	def check_bindex(self):
+		"""Check whether the set of all band indices spans all states
+
+		Returns:
+		True or False
+		"""
+		try:
+			eivals = self.get_eival_by_bindex()
+		except:
+			return False
+		total_neig_b = sum([len(eivals[b]) - np.isnan(eivals[b]).sum() for b in eivals])
+		return total_neig_b == self.get_total_neig()
+
+	def get_eival_by_bindex(self, b = None):
+		"""Get eigenvalues by band index
+
+		Argument:
+		b   If None, get eigenvalues for all band indices. If an integer or a
+		    1-tuple, get eigenvalues for this band index. If a 2-tuple, get
+		    eigenvalues with this LL index and band index.
+
+		Returns:
+		Array of eigenvalues.
+		"""
+		if len(self.data) == 0:
+			return None
+		if self.data[0].bindex is None:
+			raise ValueError("No band index data")
+		bidx = self.get_all_bindex()
+		if b is None:
+			eivals = {}
+			for d in self.data:
+				d.build_tuple_index_cache()
+			for b in bidx:
+				bi = (b,) if isinstance(b, (int, np.integer)) else b  # make a tuple
+				eival = [d.get_eival(bi) for d in self.data]
+				eivals[b] = np.array([float("nan") if e is None else e for e in eival], dtype = float)
+			return eivals
+		elif isinstance(b, (int, np.integer)):
+			eival = [d.get_eival((b,)) for d in self.data]
+			eival = np.array([float("nan") if e is None else e for e in eival], dtype = float)
+		elif isinstance(b, tuple) and len(b) in [1, 2]:
+			eival = [d.get_eival(b) for d in self.data]
+			eival = np.array([float("nan") if e is None else e for e in eival], dtype = float)
+		else:
+			raise TypeError("Input should be integer, tuple (1- or 2-), or None")
+		return eival
+
+	def get_observable_by_bindex(self, obs = None, b = None):
+		"""Get observable by band index
+
+		Argument:
+		obs  String. Observable id.
+		b    If None, get eigenvalues for all band indices. If an integer or a
+		     1-tuple, get eigenvalues for this band index. If a 2-tuple, get
+		     eigenvalues with this LL index and band index.
+
+		Returns:
+		Array of observable values
+		"""
+		if len(self.data) == 0:
+			return None
+		if self.data[0].bindex is None:
+			raise ValueError("No band index data")
+		if self.data[0].obsvals is None:
+			raise ValueError("No observables data")
+		bidx = self.get_all_bindex()
+		if b is None:
+			obsvals = {}
+			for d in self.data:
+				d.build_tuple_index_cache()
+			for b in bidx:
+				bi = (b,) if isinstance(b, (int, np.integer)) else b  # make a tuple
+				obsvals[b] = array_none_to_nan([d.get_observable(obs, bi) for d in self.data])
+			return obsvals
+		elif isinstance(b, (int, np.integer)):
+			obsvals = array_none_to_nan([d.get_observable(obs, (b,)) for d in self.data])
+		elif isinstance(b, tuple) and len(b) in [1, 2]:
+			obsvals = array_none_to_nan([d.get_observable(obs, b) for d in self.data])
+		else:
+			raise TypeError("Input should be integer, tuple (1- or 2-), or None")
+		return None if obsvals is None else obsvals.T
+
+	def find(self, kval, paramval = None, return_index = False, strictmatch = False):
+		"""Find a data point.
+
+		Arguments:
+		kval          Float or Vector instance. The momentum to search for.
+		paramval      None, float or Vector instance. If set, the parameter
+		              value (magnetic field value) to search for.
+		return_index  True or False. If True, return DiagDataPoint instance and
+		              its position in self.data. If False, return DiagDataPoint
+		              instance only.
+		strictmatch   True of False. If True, then test vector values on
+		              identity (same value and same representation). If False,
+		              test vector values on equality (same value, but not
+		              necessarily same representation).
+
+		Returns:
+		ddp   DiagDataPoint instance.
+		j     Integer. The index of ddp in self.data.
+
+		Note:
+		If there is no match, return None or None, None.
+		"""
+		all_k = self.get_momenta()
+		if paramval is not None:
+			all_p = self.get_paramval()
+			if all_p is None:
+				raise ValueError("Argument paramval requested but not available")
+			for j, k, p in zip(list(range(0, len(self.data))), all_k, all_p):
+				if ((strictmatch and isinstance(k, Vector) and isinstance(kval, Vector) and kval.identical(k)) or (not strictmatch and k == kval)) and abs(paramval - p) < 1e-6:
+					if return_index:
+						return self.data[j], j
+					else:
+						return self.data[j]
+		else:
+			for j, k in enumerate(all_k):
+				if (strictmatch and isinstance(k, Vector) and isinstance(kval, Vector) and kval.identical(k)) or (not strictmatch and k == kval):
+					if return_index:
+						return self.data[j], j
+					else:
+						return self.data[j]
+		if return_index:
+			return None, None
+		else:
+			return None
+
+	def get_data_labels(self, by_index = None):
+		"""Get data labels and plot mode.
+		This function is used to get data sets for plots.
+
+		Arguments:
+		by_index   If True, the returned labels are the band indices. Otherwise,
+		           the momenta or parameter (magnetic field) values.
+
+		Returns:
+		labels    List of band labels.
+		plotmode  One of "index", "momentum", "paramval".
+		"""
+		if len(self.data) == 0:
+			return None
+		elif by_index:
+			b_idx = self.get_all_bindex()
+			if b_idx is not None:
+				return b_idx, "index"
+			# fallthrough:
+		if self.gridvar != 'k':  # and #(self.get_paramval() is not None):
+			return list(zip(self.get_momenta(), self.get_paramval())), "paramval"
+		else:
+			return self.get_momenta(), "momentum"
+
+	def get_plot_coord(self, label, mode):
+		"""Get plot coordinates.
+		This function is used to get coordinates (x, E(x)) for standard plots or
+		coordinates ((x, y), E(x, y)) for contour plots.
+
+		Arguments:
+		label   Band label: Integer, 2-tuple, or Vector instance.
+		mode    Data mode. One of: "index", "index2d", "index3d", "paramval"
+		        or "param", "momentum" or "k".
+
+		Returns:
+		kval    Array with coordinates x or (x, y)
+		eival   Array with coordinates E(x) or E(x, y)
+		These arrays have identical shapes.
+		"""
+		if mode == "index":
+			return self.get_momenta() if self.gridvar == 'k' else self.get_paramval(), self.get_eival_by_bindex(label)
+		elif mode == "index2d":
+			if len(self.shape) != 2:
+				raise ValueError("Not a 2D grid")
+			kval_flat = self.get_momenta()
+			kval = [[kval_flat[jx * self.strides[0] + jy * self.strides[1]] for jy in range(0, self.shape[1])] for jx in range(0, self.shape[0])]
+			eival = np.reshape(self.get_eival_by_bindex(label), self.shape)
+			return kval, eival
+		elif mode == "index3d":
+			if len(self.shape) != 3:
+				raise ValueError("Not a 3D grid")
+			kval_flat = self.get_momenta()
+			kval = [[[kval_flat[jx * self.strides[0] + jy * self.strides[1] + jz * self.strides[2]] for jz in range(0, self.shape[2])] for jy in range(0, self.shape[1])] for jx in range(0, self.shape[0])]
+			eival = np.reshape(self.get_eival_by_bindex(label), self.shape)
+			return kval, eival
+		elif mode == "paramval" or mode == "param":
+			if not (isinstance(label, tuple) and len(label) == 2):
+				raise TypeError("Argument label must be 2-tuple")
+			ddp = self.find(label[0], label[1])
+			return label[1], None if ddp is None else ddp.eival
+		elif mode == "momentum" or mode == "k":
+			ddp = self.find(label)
+			return label, None if ddp is None else ddp.eival
+		else:
+			raise ValueError("Invalid value for argument mode")
+
+	def get_observable(self, obs, label, mode):
+		"""Get observable values belonging to output from self.get_plot_coord.
+
+		Arguments:
+		obs     String with observable id or a list of them.
+		label   Band label: Integer, 2-tuple, or Vector instance.
+		mode    Data mode. One of: "index", "index2d", "paramval" or "param",
+		        "momentum" or "k".
+
+		Returns:
+		obsval  Array with values of the observable(s)
+		"""
+		if mode == "index":
+			return self.get_observable_by_bindex(obs, label)
+		elif mode == "index2d":
+			if not len(self.shape) == 2:
+				raise ValueError("Grid shape is not 2D")
+			obsval = self.get_observable_by_bindex(obs, label)
+			target_shape = obsval.shape[:-1] + self.shape
+			return np.reshape(self.get_observable_by_bindex(obs, label), target_shape)
+		elif mode == "paramval" or mode == "param":
+			if not (isinstance(label, tuple) and len(label) == 2):
+				raise TypeError("Argument label must be 2-tuple")
+			ddp = self.find(label[0], label[1])
+			return None if ddp is None else ddp.get_observable(obs)
+		elif mode == "momentum" or mode == "k":
+			ddp = self.find(label)
+			return None if ddp is None else ddp.get_observable(obs)
+		else:
+			raise ValueError("Invalid value for argument mode")
+
+	def set_observable_values(self, obsid, obsval, label):
+		"""Set observable values.
+
+		Arguments:
+		obsid   String with observable id.
+		obsval  Array. Observable values to set.
+		label   Band label: Integer, 2-tuple, or Vector instance; or a list of
+		        band labels.
+
+		No return value
+		"""
+		if len(obsval) != len(self.data):
+			raise ValueError("Invalid shape for 'obsval'")
+
+		if isinstance(label, (list, np.ndarray)):
+			if len(label) != len(self.data):
+				raise ValueError("Invalid shape for 'label'")
+			for d, o, lb in zip(self.data, obsval, label):
+				d.set_observable_value(obs = obsid, obsval = o, bandval = lb)
+		else:
+			for d, o in zip(self.data, obsval):
+				d.set_observable_value(obs = obsid, obsval = o, bandval = label)
+		return
+
+	def get_values_dict(self, quantities, sort=True, flat=True):
+		"""Extract quantities from this instance and put them in a dict.
+
+		Arguments:
+		quantities  List of strings. The quantities to extract. Possible choices
+		            are 'E', 'llindex', 'bindex', 'char', the vector components
+		            of 'k' and 'b' (including 'k' and 'b' themselves) and any
+		            observable.
+		sort        True or False. If True, sort the DiagDataPoints by (energy)
+		            eigenvalue.
+		flat        True or False. If True, each dict value is a one-dimensional
+		            array with the corresponding values of all data points. If
+		            False, each dict value is a list of arrays where each array
+		            represents a DiagDataPoint.
+
+		Returns:
+		result      A dict instance, where the keys are the valid quantities in
+		            the input argument quantities and the values the
+		            corresponding values, in the form of a 1-dimensional array
+		            (if flat = True) or a list of 1-dimensional arrays (if
+		            flat = False).
+		"""
+		if len(self.data) == 0:
+			return {}
+		result = {}
+		obsids = self.data[0].obsids
+		obsids = obsids if obsids is not None else []
+		data = [d.sort_by_eival() for d in self.data] if sort else self.data
+		missing_quantities = []
+		for q in quantities:
+			if q == 'E':
+				result[q] = [d.eival for d in data]
+			elif q == 'bindex':
+				result[q] = [d.bindex if d.bindex is not None else np.full(d.neig, -99) for d in data]
+			elif q == 'llindex':
+				result[q] = [d.llindex if d.llindex is not None else np.full(d.neig, 0,) for d in data]
+			elif q == 'char':
+				result[q] =  [d.char if d.char is not None else np.full(d.neig, '') for d in data]
+			elif q in obsids:
+				result[q] = [np.real(d.get_observable(q)) for d in data]
+			elif q in ['k', 'kx', 'ky', 'kz', 'kphi', 'ktheta']:
+				if self.grid is not None and self.gridvar == 'k':
+					kvalues = self.grid.get_values(q)
+				elif all(isinstance(d.k, Vector) for d in data):
+					kvalues = [d.k.component(q, prefix='k') for d in data]
+				else:
+					missing_quantities.append(q)
+					continue
+				result[q] = [np.full(d.neig, k) for d, k in zip(data, kvalues)]
+			elif q in ['b', 'bx', 'by', 'bz', 'bphi', 'btheta']:
+				if self.grid is not None and self.gridvar == 'b':
+					kvalues = self.grid.get_values(q)
+				elif all(isinstance(d.paramval, Vector) for d in data):
+					kvalues = [d.paramval.component(q, prefix='b') for d in data]
+				else:
+					missing_quantities.append(q)
+					continue
+				result[q] = [np.full(d.neig, k) for d, k in zip(data, kvalues)]
+		if len(missing_quantities) == 1:
+			sys.stderr.write(f"ERROR (DiagData.get_values_dict): The quantity {missing_quantities[0]} is not defined.\n")
+		if len(missing_quantities) > 1:
+			qstr = ", ".join(missing_quantities)
+			sys.stderr.write(f"ERROR (DiagData.get_values_dict): The quantities {qstr} are not defined.\n")
+		if flat:
+			return {q: np.hstack(v) for q, v in result.items()}
+		else:
+			return result
+
+	def filter_transitions(self, energies, broadening=None, ampmin=100, inplace=False):
+		"""Determine the transition amplitudes for a system filled to a certain energy.
+
+		Arguments:
+		energies     Energies at which to calculate the transitions. This can be
+		             a number (constant energy), or a one-dimensional list or
+		             array. If it is a list or array, its length must be
+		             commensurate with the length of the present DiagData
+		             instance.
+		broadening   BroadeningFunction or None. If set, use the occupation
+		             function from broadening. If None, assume a step function.
+		ampmin       Float. Threshold value of the transition amplitude;
+		             transitions whose amplitude is lower are discarded.
+		inplace      True or False. If True, overwrite the DiagDataPoint members
+		             in the present DiagData instance. If False, create a new
+		             instance.
+
+		Returns:
+		filtered_dd  DiagData instance, whose DiagDataPoint elements contain the
+		             filtered set of transitions: their transitions attributes
+		             (ddp.transitions) are set to a new TransitionsData
+		             instance, which is a 'filtered' version of the input. The
+		             DiagDataPoint instances are otherwise identical.
+		"""
+		if len(self) == 0:
+			sys.stderr.write("Warning (DiagData.filter_transitions): No data.\n")
+			return self
+		if all(ddp.transitions is None for ddp in self.data):
+			sys.stderr.write("Warning (DiagData.filter_transitions): No transitions data.\n")
+			return self
+
+		nd = len(self)
+		if isinstance(energies, (float, np.floating, int, np.integer)):
+			energies = np.full(nd, energies)
+		elif isinstance(energies, (np.ndarray, list)):
+			energies = np.asarray(energies)
+			if energies.ndim > 1:
+				sys.stderr.write(
+					"Warning (DiagData.filter_transitions): Energy array should not have dimension > 1.\n")
+				return self
+			ne = len(energies)
+			if ne > nd:
+				if (ne - 1) % (nd - 1) != 0:
+					sys.stderr.write(
+						"Warning (DiagData.filter_transitions): Energy array not commensurate with data array.\n")
+					return self
+				subdiv = (ne - 1) // (nd - 1)
+				energies = energies[::subdiv]
+			elif ne == 1:
+				energies = np.full(nd, energies)
+			elif ne != nd:
+				sys.stderr.write(
+					"Warning (DiagData.filter_transitions): Energy array not commensurate with data array.\n")
+				return self
+			else:
+				pass
+		else:
+			sys.stderr.write("Warning (DiagData.filter_transitions): Invalid input for energies.\n")
+			return self
+
+		filtered_data = []
+		for ee, ddp in zip(energies, self.data):
+			filtered_data.append(ddp.filter_transitions(ee, broadening=broadening, ampmin=ampmin, inplace=inplace))
+		if inplace:
+			self.data = filtered_data
+			return self
+		else:
+			new_diagdata = copy.copy(self)
+			new_diagdata.data = filtered_data
+			return new_diagdata
+
+	def shift_energy(self, delta):
+		"""Shift all energies by an amount delta (in meV)"""
+		if len(self.data) == 0:
+			return
+		for d in self.data:
+			d.eival += delta
+
+	def set_zero_energy(self, delta = 0.0):
+		"""Shift all energies, such that the charge neutrality point is set to zero.
+
+		Argument:
+		delta   Float. If nonzero, set charge neutrality point to this value,
+		        instead of zero.
+
+		Returns:
+		delta_e  The energy shift that accomplishes this. May be None if the
+		         energy shift could not be determined.
+		"""
+		data_k0 = self.get_zero_point()
+		if data_k0 is None:
+			sys.stderr.write("Warning (DiagData.set_zero_energy): Cannot set zero energy, because zero point is absent.\n")
+			return None
+		if data_k0.bindex is None:
+			sys.stderr.write("Warning (DiagData.set_zero_energy): Cannot set zero energy, because band indices are absent.\n")
+			return None
+		e0 = data_k0.get_eival0()
+		if e0 is None:
+			sys.stderr.write("Warning (DiagData.set_zero_energy): Cannot set zero energy, because zero gap is out of range.\n")
+			return None
+		delta_e = delta - e0
+		self.shift_energy(delta_e)
+		return delta_e
+
+	def set_shape(self, shape = None):
+		"""Set shape of data array.
+		The data is always a flat list, but self.shape determines how it needs
+		to be interpreted.
+
+		Argument:
+		shape   Tuple or None. If a tuple, set self.shape to this value. If
+		        None, set shape to (len(self.data),), i.e., interpret the data
+		        as a flat array.
+
+		Note:
+		If the shape is not compatible with the number of data points, then
+		raise an error.
+		"""
+		if shape is None:
+			self.shape = (len(self.data),)
+			self.strides = (1,)
+		elif isinstance(shape, tuple):
+			if np.prod(shape) != len(self.data):
+				raise ValueError("Shape does not match number of data points")
+			self.shape = shape
+			self.strides = tuple(list(np.cumprod(self.shape[::-1]))[-2::-1] + [1])
+		else:
+			raise TypeError("Shape must be a tuple of integers")
+
+	def symmetry_test(self, tfm, observables = None, ignore_lower_dim = False, verbose = False):
+		"""Do symmetry analysis"""
+		# TODO: Documentation after update of function
+		# Find pairs
+		allsymmetries = {}
+		n_kmatches = 0
+		n_ematches = 0
+		n_zero = 0
+		n_self = 0
+		match_xmax = 0.0
+		match_ymax = 0.0
+		match_zmax = 0.0
+		n_deg = 0  # number of k points where there are degeneracies
+		vector_obs = True
+
+		if isinstance(tfm, str):
+			old_to_new = {'x': 'm(x)', 'y': 'm(y)', 'z': 'm(z)', 'xy': '2(z)', 'xyz': 'i'}
+			if tfm in old_to_new:
+				tfm = old_to_new[tfm]  # translation between old and new
+			try:
+				tfm = get_vectortransformation(tfm)
+			except:
+				sys.stderr.write("ERROR (DiagData.symmetry_test): Unknown transformation '%s'\n" % tfm)
+				return None, None
+		if not isinstance(tfm, VectorTransformation):
+			raise TypeError("Argument tfm must be a VectorTransformation instance or the name of such an instance.")
+
+		# print ("Vector")
+		# print (tfm.transform('vector', np.eye(3)))
+		# print ("Axial")
+		# print (tfm.transform('axial', np.eye(3)))
+		# tfminv = tfm.inv()
+		# overlaps_min, overlaps_max = 1.0, 0.0
+
+		for j0, d in enumerate(self.data):
+			k_tfm = tfm(d.k)
+			i2 = self.grid.index(k_tfm, acc = 1e-10)
+			if i2 is None:  # no match
+				continue
+			d2 = self.data[i2]
+			# print ("%-25s --> %-25s %-25s" % (d.k, k_tfm, d2.k))
+			if d.k == 0.0:  # zero
+				n_zero += 1
+				continue
+			if d2.k == d.k:  # self-match
+				n_self += 1
+				continue
+			n_kmatches += 1
+			kx, ky, kz = d.k.xyz()
+			match_xmax = max(match_xmax, abs(kx))
+			match_ymax = max(match_ymax, abs(ky))
+			match_zmax = max(match_zmax, abs(kz))
+			d2.sort_by_eival(inplace = True)
+			d1 = d.sort_by_eival(inplace = False)
+			if np.amax(np.abs(d1.eival - d2.eival)) > 1e-6:
+				allsymmetries = None
+				# print (d1.k, d2.k, 'E1 != E2')
+				continue
+			n_ematches += 1
+
+			# Detect degenerate states
+			eidiff0 = np.diff(d1.eival) < 1e-6
+			eidiff = np.concatenate(([eidiff0[0]], eidiff0[1:] | eidiff0[:-1], [eidiff0[-1]]))
+			if np.count_nonzero(eidiff) > 0:
+				n_deg += 1
+				# print (d1.k, d2.k, 'n_deg:', np.count_nonzero(eidiff))
+			if observables is False or allsymmetries is None:
+				continue
+			for obs in d1.obsids:
+				observables1 = d1.obsids if observables is None or observables is True else observables
+				if obs not in observables1 and obs not in d2.obsids:
+					# print ("Observable %s not in mirror image" % obs)
+					continue
+				symmetries = []
+				if vector_obs and obs.endswith('x') and obs[:-1] + 'y' in d1.obsids and obs[:-1] + 'z' in d1.obsids:
+					# vector observable
+					obsxyz = obs, obs[:-1] + 'y', obs[:-1] + 'z'
+					if obsxyz[1] not in d2.obsids or obsxyz[2] not in d2.obsids:
+						continue
+					obs = obs[:-1] + "(x,y,z)"
+					obsval1 = np.real([d1.get_observable(o) for o in obsxyz])
+					obsval2 = np.real([d2.get_observable(o) for o in obsxyz])
+					# For degenerate states, set observable values to 0
+					obsval1[:, eidiff] = 0.0
+					obsval2[:, eidiff] = 0.0
+
+					if np.amax(np.abs(obsval1)) < 1e-6 and np.amax(np.abs(obsval2)) < 1e-6:
+						symmetries.append("zero")
+					# if np.amax(np.abs(obsval2 - obsval1)) < 1e-6:
+					# 	symmetries.append("symmetric")
+					# if np.amax(np.abs(obsval2 + obsval1)) < 1e-6:
+					# 	symmetries.append("antisymmetric")
+					for rep in ['t1g', 't1u', 't2g', 't2u']:
+						v_obsval1 = tfm.transform(rep, obsval1)
+						if np.amax(np.abs(v_obsval1 - obsval2)) < 1e-6:
+							symmetries.append(rep)
+
+					if verbose and len(symmetries) == 0 and (obs not in allsymmetries or len(allsymmetries[obs]) != 0):
+						print(d1.k, d2.k, "Not symmetric")
+						print(obsval1[:, 0], 'o1')
+						for rep in ['t1g', 't1u', 't2g', 't2u']:
+							v_obsval1 = tfm.transform(rep, obsval1)
+							print(v_obsval1[:, 0], 'V o1 (%s)' % rep)
+						print(obsval2[:, 0], 'o2')
+
+				elif vector_obs and (obs.endswith('y') or obs.endswith('z')) and obs[:-1] + 'x' in d1.obsids:
+					# Skip y, z components of a vector
+					continue
+				else:
+
+					obsval1 = d1.get_observable(obs)
+					obsval2 = d2.get_observable(obs)
+					# For degenerate states, set observable values to 0
+					obsval1[eidiff] = 0.0
+					obsval2[eidiff] = 0.0
+
+					if np.amax(np.abs(obsval1)) < 1e-6 and np.amax(np.abs(obsval2)) < 1e-6:
+						symmetries.append("zero")
+					# if np.amax(np.abs(obsval1 - obsval2)) < 1e-6: # and isreal:
+					# 	symmetries.append("symmetric")
+					# if np.amax(np.abs(obsval1 + obsval2)) < 1e-6: # and isreal:
+					# 	symmetries.append("antisymmetric")
+					for rep in ['a1g', 'a1u', 'a2g', 'a2u']:
+						v_obsval1 = tfm.transform(rep, obsval1)
+						if np.amax(np.abs(v_obsval1 - obsval2)) < 1e-6:
+							symmetries.append(rep)
+
+				if obs not in allsymmetries:
+					allsymmetries[obs] = symmetries
+				else:
+					# Intersection of existing list and current list of symmetries
+					compatible_symmetries = [symm for symm in symmetries if symm in allsymmetries[obs]]
+					allsymmetries[obs] = compatible_symmetries
+
+		# Evaluate matches (points with matching energies versus valid momentum pairs)
+		n_matches = n_ematches + n_zero + n_self
+		if verbose:
+			print("matches: %i/%i (k,e) + %i (0) + %i (s) = %i/%i" % (n_kmatches, n_ematches, n_zero, n_self, n_kmatches + n_zero + n_self, n_matches))
+
+		grid_dim = len(self.grid.shape)
+		lowdim_sizes = []
+		for d in range(1, grid_dim):
+			subshapes = self.grid.subgrid_shapes(d)
+			lowdim_sizes += [np.prod(s) for s in subshapes]
+
+		e_match = False
+		if n_matches == len(self):
+			print("Full match")
+			e_match = True
+		elif n_matches == 0:
+			print("No match")
+		elif n_ematches == 0 and n_zero > 0:
+			print("Match only at zero")
+		elif n_ematches > 0 and n_matches in lowdim_sizes:
+			print("Match on lower-dimensional subgrid")
+		elif n_ematches == n_kmatches and (lowdim_sizes == [] or n_matches >= max(lowdim_sizes)):
+			print("Full match on grid overlap (large region)")
+			e_match = True
+		elif n_ematches == n_kmatches:
+			print("Full match on grid overlap (small region)")
+		else:
+			print("Insufficient match")
+		print()
+
+		if observables is False:
+			return e_match, None
+
+		if not e_match or allsymmetries is None:
+			# print ("Not a Hamiltonian symmetry (eigenvalues not symmetric)")
+			# print()
+			return e_match, None
+
+		print("Observable representations:")
+
+		for obs in allsymmetries:
+			allsymmetries[obs] = [rep[:1].upper() + rep[1:] if isinstance(rep, str) and len(rep) > 0 else rep for rep in allsymmetries[obs]]
+
+		olen = 0
+		for obs in allsymmetries:
+			olen = max(olen, len(obs))
+		fmt = "%%-%is: %%s" % olen
+		for obs in sorted(allsymmetries):
+			if len(allsymmetries[obs]) == 0:
+				symm = '???'
+			elif len(allsymmetries[obs]) == 1:
+				symm = allsymmetries[obs][0]
+			elif 'Zero' in allsymmetries[obs]:
+				symm = 'Zero'
+			else:
+				symm = ", ".join(allsymmetries[obs])
+			print(fmt % (obs, symm))
+		if n_deg > 0:
+			print("Degenerate states were ignored at %i (pairs of) points" % n_deg)
+		print()
+		return e_match, allsymmetries
+
+	def symmetrize(self, axis = None, copy_eivec = True):
+		"""Symmetrize the data: Use mirror symmetries to extend the data set to a larger domain.
+
+		Argument:
+		axis        String. Which mirror symmetry to consider.
+		copy_eivec  True or False. If True, copy eigenvectors in th source data
+		            point to the target, only if the source and target point are
+		            equal in momentum. If False, discard eigenvectors for all
+		            points, also the ones in the original grid.
+
+		Returns:
+		A new DiagData instance. However, if nothing has been done, return the
+		present DiagData instance.
+
+		"""
+		if self.grid is None:
+			sys.stderr.write("ERROR (DiagData.symmetrize): Cannot symmetrize data without a VectorGrid.\n")  # TODO
+			return self
+		if axis is None:
+			if self.grid.vtype in ['x', 'y', 'z']:
+				axis = self.grid.vtype
+			elif self.grid.vtype in ['xy', 'pol']:
+				axis = 'xy'
+			elif self.grid.vtype in ['cyl', 'sph']:
+				axis = 'xyz'
+			else:
+				raise ValueError("Invalid vector type")
+		transformations = [axis] if axis in ['x', 'y', 'z'] else ['x', 'y', 'xy'] if axis == 'xy' else ['x', 'y', 'z', 'xy', 'xz', 'yz', 'xyz']
+
+		newgrid, gridmap = self.grid.symmetrize(axis)
+		if newgrid is None:
+			sys.stderr.write("ERROR (DiagData.symmetrize): Symmetrization of VectorGrid has failed.\n")
+			return self
+
+		# Define shapes of arrays with explicit 1s for constant components
+		oldfullshape = tuple([len(self.grid.get_array(co)) for co in self.grid.get_components()])
+		newfullshape = tuple([len(newgrid.get_array(co)) for co in newgrid.get_components()])
+		if newfullshape == oldfullshape:
+			sys.stderr.write("Warning (DiagData.symmetrize): Data is already symmetric.\n")
+			return self
+
+		if 'verbose' in sys.argv:
+			print("Symmetrization [components (%s)]:" % ", ".join(newgrid.get_components()), oldfullshape, '-->', newfullshape)
+		gridmaparray = tuple([gridmap[co] for co in newgrid.get_components()])
+
+		newdata = []
+		for newflatidx in range(0, len(newgrid)):
+			# Convert sequential (flat) index to multi-index:
+			newfullidx = np.unravel_index(newflatidx, newfullshape)
+			# Map multi-index of new grid to multi-index of old grid:
+			oldfullidx = tuple([gmap[idx] for gmap, idx in zip(gridmaparray, newfullidx)])
+			# Convert multi-index of old grid to sequential (flat) index:
+			oldflatidx = np.ravel_multi_index(oldfullidx, oldfullshape)
+
+			newk = newgrid[newflatidx]
+			oldk = self.grid[oldflatidx]
+			old_ddp = self.data[oldflatidx]
+			# Determine how two the source and target momentum values are related,
+			# i.e., find a transformation T so that k_new = T k_old.
+			if newk.equal(oldk, 1e-6):
+				transformation = '1'
+			else:
+				transformation = None
+				for tfm in transformations:
+					if newk == oldk.reflect(tfm):
+						transformation = tfm
+						break
+
+			if 'verbose' in sys.argv:
+				print(newfullidx, newflatidx, newk, '<--', oldfullidx, oldflatidx, oldk, 'T =', transformation)
+
+			ddp = DiagDataPoint(newk, old_ddp.eival, eivec = None)
+			if old_ddp.bindex is not None:
+				ddp.set_bindex(old_ddp.bindex, aligned_with_e0=old_ddp.aligned_with_e0)
+			if old_ddp.llindex is not None:
+				ddp.set_llindex(old_ddp.llindex)
+			if old_ddp.char is not None:
+				ddp.set_char(old_ddp.char)
+			newobsvals = np.zeros_like(old_ddp.obsvals)
+
+			if transformation is None:
+				sys.stderr.write("Warning (DiagData.symmetrize): New data point not related to an existing one.\n")
+				ddp.set_observables(float("nan") * old_ddp.obsvals, old_ddp.obsids)
+				newdata.append(ddp)
+				continue
+
+			if transformation == '1':
+				ddp.set_observables(1.0 * old_ddp.obsvals, old_ddp.obsids)
+				# copy eigenvectors only if the source and target point are equal in momentum
+				if copy_eivec and old_ddp.eivec is not None:
+					ddp.eivec = old_ddp.eivec
+				newdata.append(ddp)
+				continue
+
+			for j, o in enumerate(old_ddp.obsids):
+				if o in ['jx', 'sx'] and transformation in ['x', 'y', 'z', 'xy']:
+					newobsvals[j, :] = -1.0 * old_ddp.obsvals[j]
+				elif o in ['jy', 'sy'] and transformation in ['x', 'y', 'z', 'xy']:
+					newobsvals[j, :] = -1.0 * old_ddp.obsvals[j]
+				elif o in ['y', 'ysz', 'yjz'] and transformation == 'x':
+					newobsvals[j, :] = -1.0 * old_ddp.obsvals[j]
+				else:
+					newobsvals[j, :] = 1.0 * old_ddp.obsvals[j]
+					# Note: this also silently treats non-symmetric or undefined observables
+					# TODO: Raise a proper warning or error
+			ddp.set_observables(newobsvals, old_ddp.obsids)
+			newdata.append(ddp)
+		return DiagData(newdata, grid = newgrid)
+
+	def get_cnp(self):
+		"""Find charge neutral point at each k/B-value (using universal band indices)."""
+		return np.asarray([ddp.get_eival0() for ddp in self.data])
+
+	## Forward of 'list-like' functions
+	def __len__(self):
+		return len(self.data)
+
+	def index(self, x):
+		return self.data.index(x)
+
+	def __iter__(self):
+		return iter(self.data)
+
+	def __getitem__(self, i):
+		if isinstance(i, (int, np.integer)):
+			return self.data[i]
+		elif isinstance(i, tuple):
+			if len(i) != len(self.shape):
+				raise ValueError("Invalid index depth")
+			idx = np.sum([ii * ss for ii, ss in zip(i, self.strides)])
+			return self.data[idx]
+
+	def get_flatindices(self, indices):
+		"""Get indices of the flat data array based on (numpy style) array index"""
+		return np.arange(0, len(self.data)).reshape(self.shape)[indices]
+
+	def get_subset(self, indices):
+		"""Get subset of DiagData from (numpy style) array index
+
+		Arguments:
+		indices    Tuple of integers and slice objects. A numpy style array
+		           index.
+
+		Returns:
+		diagdata   DiagData instance. A new instance with the subset array.
+		"""
+		flatindices = self.get_flatindices(indices)
+		new_data = [self.data[i] for i in flatindices.flatten()]
+		if self.grid is not None:
+			new_grid = self.grid.get_subset(indices)
+			new_shape = None
+			new_gridvar = None
+		else:
+			new_grid = None
+			new_shape = flatindices.shape
+			new_gridvar = self.gridvar
+		return DiagData(new_data, shape=new_shape, grid=new_grid, gridvar=new_gridvar)
+
+	def append(self, data, strictmatch = False):
+		"""Append this DiagData instance with a new DiagDataPoint instance.
+		If there is already a DiagDataPoint instance at the same momentum and
+		parameter value (magnetic field), then extend the existing data point
+		with states from the new DiagDataPoint instance.
+
+		Arguments:
+		data         DiagDataPoint instance. Data to be added.
+		strictmatch  True or False. If True, check vector values for identity.
+		             If False, check for equality.
+
+		Returns:
+		The present DiagData instance
+
+		Note:
+		By adding one point, the data can no longer be represented as a
+		multi-dimensional grid. Instead, the shape is set to a flat array and
+		the grid attribute is set to None.
+		"""
+		if isinstance(data, DiagDataPoint):
+			ddp = self.find(data.k, data.paramval, strictmatch = strictmatch)
+			if ddp is None:
+				self.data.append(data)
+			else:
+				ddp.extend(data)
+		else:
+			raise TypeError("Input should be a DiagDataPoint.")
+		self.set_shape()  # reset shape to be 1D and reset grid
+		self.grid = None
+		self.gridvar = ''
+		return self
+
+	def extend(self, data):
+		"""Extend this DiagData instance with multiple new data points.
+
+		Argument:
+		data   A list of DiagDataPoint instances or a DiagData instance. Data
+		       points to be added.
+
+		Returns:
+		The present DiagData instance
+
+		Note:
+		By adding multiple points, the data can no longer guaranteed to be
+		representable as a multi-dimensional grid. Instead, the shape is set to
+		a flat array and the grid attribute is set to None.
+		"""
+		if isinstance(data, list):
+			for d in data:
+				if not isinstance(d, DiagDataPoint):
+					raise TypeError("List elements should be DiagDataPoint instances.")
+			self.data.extend(data)
+		elif isinstance(data, DiagData):
+			self.data.extend(data.data)
+		else:
+			raise TypeError("Input should be a list of DiagDataPoints or a DiagData instance.")
+		self.set_shape()  # reset shape to be 1D and reset grid
+		self.grid = None
+		self.gridvar = ''
+		return self
+
+	def __add__(self, other):
+		"""Add data by extending the data list.
+
+		Argument:
+		other   List of DiagDataPoint instances, an single DiagDataPoint
+		        instance, or a DiagData instance.
+
+		Returns:
+		A new DiagData instance.
+		"""
+		if isinstance(other, list):
+			return DiagData(self.data + other)
+		elif isinstance(other, DiagData):
+			return DiagData(self.data + other.data)
+		elif isinstance(other, DiagDataPoint):
+			return DiagData(self.data + [other])
+		else:
+			raise TypeError("Right operand should be a list, DiagData, or DiagDataPoint instance.")
+
+	def __radd__(self, other):
+		"""Reverse add. See DiagDataPoint.__add__() for more details."""
+		if isinstance(other, list):
+			return DiagData(other + self.data)
+		elif isinstance(other, DiagData):
+			return DiagData(other.data + self.data)
+		elif isinstance(other, DiagDataPoint):
+			return DiagData([other] + self.data)
+		else:
+			raise TypeError("Left operand should be a list, DiagData, or DiagDataPoint instance.")
+
+	def __iadd__(self, other):
+		"""In-place add. See DiagDataPoint.__add__() for more details."""
+		if isinstance(other, (list, DiagData)):
+			return self.extend(other)
+		elif isinstance(other, DiagDataPoint):
+			return self.append(other)
+		else:
+			raise TypeError("Right operand should be a list, DiagData, or DiagDataPoint instance.")
+
+	def interpolate(self, subdiv = 1, obs = False):
+		"""Interpolate eigenenergies to positions between the existing momentum/parameter values.
+
+		Arguments:
+		subdiv   Integer. Number of subdivisions, as in
+		         step_new = step_old / subdiv.
+		obs      True or False. Whether to interpolate values of observables
+		         too.
+
+		Returns:
+		A new DiagData instance. However, if nothing had to be done, return the
+		present DiagData instance.
+		"""
+		if subdiv == 1:
+			return self
+		elif not isinstance(subdiv, (int, np.integer)):
+			raise TypeError("Number of subdivisions must be a positive integer")
+		elif subdiv < 1:
+			raise ValueError("Number of subdivisions must be a positive integer")
+		if len(self.shape) != 1 or (self.grid is not None and len(self.grid.shape) != 1):
+			raise ValueError("Data must be one-dimensional")
+		if len(self) <= 1:
+			sys.stderr.write("ERROR (Interpolated_diagdata): Insufficient data.\n")
+			return self
+
+		bandlabels = self.get_all_bindex()
+		if bandlabels is None:
+			sys.stderr.write("ERROR (Interpolated_diagdata): Cannot interpolate if the band labels are not defined.\n")
+			return self
+		obsids = None if not observables else self.data[0].obsids
+		energies_ip = {}
+		obsvals_ip = {}
+		for lb in bandlabels:
+			_, energies = self.get_plot_coord(lb, "index")
+			energies1 = np.array([(1. - j / subdiv) * np.asarray(energies)[:-1] + (j / subdiv) * np.asarray(energies)[1:] for j in range(0, subdiv)])
+			energies_ip[lb] = np.concatenate((np.hstack(energies1.transpose()), np.asarray(energies)[-1:]), axis=0)
+			if obs:
+				obsvals = self.get_observable_by_bindex(obs = None, b = lb)
+				obsvals1 = np.array([(1. - j / subdiv) * obsvals[:, :-1] + (j / subdiv) * obsvals[:, 1:] for j in range(0, subdiv)])
+				obsvals_ip[lb] = np.concatenate((np.hstack(np.transpose(obsvals1, (2, 1, 0))), np.asarray(obsvals)[:, -1:]), axis=1)
+
+		lold = len(self.data)
+		lnew = (lold - 1) * subdiv + 1
+		if self.grid is not None:
+			newgrid = self.grid.subdivide(None, subdiv)
+			newdata = [None for j in range(0, lnew)]
+			for j in range(0, lnew):
+				if self.gridvar == 'k':
+					newparamval = None
+					newk = newgrid[j]
+				else:
+					newparamval = newgrid[j]
+					newk = self.data[j // subdiv].k  # floor function; TODO: Interpolate ??
+				eival = np.array([energies_ip[lb][j] for lb in bandlabels])
+				eisel = ~np.isnan(eival)
+				bidx = [lb for lb, is_ok in zip(bandlabels, eisel) if is_ok]
+				ddp = DiagDataPoint(newk, eival[eisel], None, paramval = newparamval)
+				if isinstance(bidx[0], (int, np.integer)):
+					ddp.set_bindex(bidx, aligned_with_e0=self.aligned_with_e0)
+				else:
+					ddp.set_llindex([lb[0] for lb in bidx])
+					ddp.set_bindex([lb[1] for lb in bidx], aligned_with_e0=self.aligned_with_e0)
+				if obs:
+					obsvals = np.array([obsvals_ip[lb][:, j] for lb in bandlabels])
+					ddp.set_observables(obsvals[eisel].transpose(), obsids = obsids)
+				newdata[j] = ddp
+
+			return DiagData(newdata, grid = newgrid)
+		else:
+			raise NotImplementedError("Not yet implemented for DiagData without VectorGrid instance")
+
+	def to_binary_file(self, filename):
+		"""Save data to a numpy binary (npz) file.
+		For each DiagDataPoint instance, save the fields (member variables)
+		specified in global variable binfile_ddp_fields as a separate array in
+		the file. Also save arrays of the VectorGrid (momentum and/or parameter
+		values).
+
+		For Numpy format: The file is a compressed npz file with a collection of
+		numpy arrays. For more information on the file format, consult:
+		https://numpy.org/doc/stable/reference/generated/numpy.lib.format.html
+
+		For HDF5 format: The file is a HDF5 container and the data is saved in a
+		separate group for each DiagDataPoint. The values for k and b are stored
+		as attributes. We do not use compression because it would reduce the
+		file size only minimally. See also:	https://docs.h5py.org
+
+		Argument:
+		filename   String. The file name. The output type is extracted from the
+		           file name extension.
+
+		No return value
+		"""
+		# TODO: Update with HDF5 and rename to to_binary_file() or remove (it is
+		# currently not used).
+		## If empty, do not write anything
+		if len(self.data) == 0:
+			sys.stderr.write("Warning (DiagData.to_npz): Empty DiagData, nothing to write.\n")
+			return
+
+		## Check file extension
+		ext = filename.split('.')[-1]
+
+		## Do a check of fields on first data point
+		for field in binfile_ddp_fields:
+			if field not in dir(self.data[0]):
+				raise AttributeError("Field %s is not a valid member of DiagDataPoint class." % field)
+
+		## Gather data from DiagDataPoint instances
+		ddp_data = {}
+		sep = '/' if ext in ['h5', 'hdf5'] else '.'  # label separator (. for npz, / for hdf5)
+		for j, ddp in enumerate(self.data):
+			label = 'ddp%i' % j
+			for field in binfile_ddp_fields:
+				if isinstance(getattr(ddp, field), np.ndarray):  # also excludes None
+					ddp_data[label + sep + field] = getattr(ddp, field)
+
+		## Gather data from Vector grid or (if grid is not set) the 'x values'
+		grid_data = {}
+		xval = self.get_xval()
+		if self.grid is not None:
+			grid_arrays = self.grid.get_grid(comp = 'all')
+			grid_comp = self.grid.get_components(include_prefix = True)
+			for co, arr in zip(grid_comp, grid_arrays):
+				grid_data[co] = arr
+		elif all([isinstance(x, Vector) for x in xval]):
+			vtype = xval[0].vtype
+			if all([x.vtype == vtype for x in xval]):
+				comp = xval[0].components(prefix = self.gridvar)
+				for co in comp:
+					arr = np.array([x.component(co, prefix = self.gridvar) for x in xval])
+					grid_data[co] = arr
+			else:
+				sys.stderr.write("Warning (DiagData.to_npz): Vectors (xval) do not have uniform vtype.\n")
+		elif all([isinstance(x, (tuple, list)) for x in xval]):
+			l = len(xval[0])
+			if all([len(x) == l for x in xval]):
+				co = self.gridvar if isinstance(self.gridvar, str) and len(self.gridvar) > 0 else 'x'
+				grid_data[co] = np.array(xval).transpose()
+			else:
+				sys.stderr.write("Warning (DiagData.to_npz): x values do not have uniform length.\n")
+		elif all([isinstance(x, (float, complex, np.floating, np.complex_)) for x in xval]):
+			co = self.gridvar if isinstance(self.gridvar, str) and len(self.gridvar) > 0 else 'x'
+			grid_data[co] = np.array(xval)
+		else:
+			sys.stderr.write("Warning (DiagData.to_npz): Data type of x values is invalid or not uniform.\n")
+
+		## Save file
+		if ext == 'npz':
+			try:
+				np.savez_compressed(filename, **grid_data, **ddp_data)
+			except:
+				sys.stderr.write("ERROR (DiagDataPoint.to_binary_file): Failed to write to Numpy binary file '%s'\n" % filename)
+		elif ext in ['h5', 'hdf5']:
+			try:
+				hdf5o.create(filename)
+				hdf5o.append_retry(filename, 'grid', data = grid_data)
+				hdf5o.append_retry(filename, 'diagdata', data = ddp_data)
+			except:
+				sys.stderr.write("ERROR (DiagDataPoint.to_binary_file): Failed to write to HDF5 binary file '%s'\n" % filename)
+				raise
+		else:
+			sys.stderr.write("ERROR (DiagDataPoint.to_binary_file): Unknown file type/extension '%s'\n" % ext)
+
+
+	def diagonalize(self, model, solver, opts_list = None):
+		"""Start diagonalization for all DiagDataPoints of this DiagData instance.
+
+		Arguments:
+		model	    ModelBase (or children) instance. Defines the calculation
+		            model and functions to be called.
+		solver	    DiagSolver instance. Contains the process/thread
+		            information for progress calculation.
+		opts_list   Dictionary of list options, i.e., options specific per
+		            DiagDataPoint.
+		"""
+		if opts_list is None:
+			opts_list = {}
+		save_ddp = get_config('diag_save_binary_ddp')
+		if save_ddp in ['hdf5', 'h5']:
+			hdf5o.create("ddps.h5")  # initialize HDF5 file if needed
+		task_manager = TaskManager(handle_sigchld=solver.handle_sigchld)
+		progress = Progress('Main diagonalization', len(self), solver.num_processes)
+		for j, ddp in enumerate(self):
+			ddp.opts.update(dict_plus_array_dict({}, opts_list, j))
+			model.enqueue_task(ddp, task_manager, progress)
+		with task_manager as tm:
+			tm.do_all()
+		gc.collect()
+		return self
diff --git a/kdotpy-v1.0.0/src/kdotpy/diagonalization/diagonalization.py b/kdotpy-v1.0.0/src/kdotpy/diagonalization/diagonalization.py
new file mode 100644
index 0000000000000000000000000000000000000000..72fc7e2b36f2e3a38f95053b70b31413dfe024d6
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/diagonalization/diagonalization.py
@@ -0,0 +1,1150 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import sys
+import numpy as np
+from time import time as rtime
+from scipy.linalg import eigh
+from scipy.sparse.linalg import eigsh
+
+from ..physconst import eoverhbar
+from ..config import get_config_num
+from ..symbolic import SymbolicHamiltonian
+from ..momentum import Vector, VectorGrid
+from ..parallel import show_job_monitor, job_monitor_k_b
+from .. import hamiltonian as hm
+from ..bandtools import band_types
+from ..lltools import delta_n_ll, scaleup_eivec, scaleup_full_eivec, whichbands_ll
+from ..transitions import get_transitions, get_transitions_full
+from ..berry import berrycurv_k, chernnumber_ll, chernnumber_ll_full
+from .. import intervaltools
+
+from .diagdata import DiagDataPoint
+
+# TODO: Remove obsolete arguments and put them in a more logical order.
+
+### FOR JOB MONITOR ###
+def magn_monitor(magn):
+	"""Auxiliary function for job monitor"""
+	return "%s" % magn if isinstance(magn, Vector) else "%g" % magn
+
+### 2D DIAGONALIZATION ROUTINES ###
+def hz(
+		k_b, params, energy = 0.0, neig = 50, lattice_reg = False, split = 0.0,
+		splittype = 'auto', ignorestrain = False, obs = None, pot = None,
+		axial = True, overlap_eivec = None, return_eivec = None, berry = False,
+		obs_prop = None, bia = False, ignore_magnxy = False, wflocations = None,
+		solver = None, **ignored_opts):
+	"""Diagonalization for 2D geometry for one value of momentum and magnetic field.
+
+	Arguments:
+	k_b            Vector or float, or 2-tuple of those. Momentum, or momentum
+	               and magnetic field.
+	params         PhysParams instance.
+	energy         Float. Target energy of the shift-and-invert algorithm.
+	neig           Integer. Number of eigenvalues.
+	lattice_reg    True or False. Whether to apply lattice regularization
+	               (x -> sin x).
+	split          Float. Amount of degeneracy lifting.
+	splittype      String. Type of degeneracy lifting.
+	ignorestrain   True or False. If True, do not include strain terms in the
+	               Hamiltonian.
+	obs            List of strings or None. Observable ids of the observables
+	               that will be calculated. If None or empty list, do not do
+	               anything.
+	pot            Array. Potential V(z) in meV as function of position.
+	axial          True or False. If True, apply axial approximation. If False,
+	               include non-axial terms in the Hamiltonian.
+	overlap_eivec  A dict, whose keys are the band labels (characters) and
+	               values are the eigenvectors for which overlaps can be
+	               calculated with the eigenvectors of this Hamiltonian.
+	return_eivec   True, False or None. If True, keep eigenvector data in the
+	               return value (DiagDataPoint instance). If False, discard
+	               them. If None, discard them only if observables have been
+	               calculated.
+	berry          2-tuple, True or False. If a 2-tuple of integers, calculate
+	               Berry curvature for bands with indices in this range. If
+	               True, calculate Berry curvature for all states. If False, do
+	               not calculate Berry curvature.
+	obs_prop       ObservableList instance containing all observable properties.
+	bia            True or False. If True, include BIA terms in the Hamiltonian.
+	wflocations    List, array, or VectorGrid instance. Contains the magnetic
+	               field values where wave functions should be saved (plot and
+	               table). None if no wave functions should be saved.
+	ignore_magnxy  True or False. If True, neglect the in-plane components of
+	               the orbital part of the magnetic field. Only for legacy
+	               reasons, e.g., comparing with results that were calculated
+	               when these terms were not yet implemented.
+	solver		   DiagSolver instance
+
+	Returns:
+	A DiagDataPoint instance.
+	"""
+	if isinstance(k_b, tuple):
+		k, b = k_b
+	else:
+		k, b = k_b, Vector(0.0, astype = "z")
+	if isinstance(k, Vector):
+		kx, ky = k.xy()
+	else:
+		raise TypeError("Argument k must be a Vector instance")
+	t0 = rtime()
+	show_job_monitor("%s  C start" % job_monitor_k_b(k, b))
+	kterms = hm.h_kterms(params, axial = axial) if params.lattice_transformed() else None
+	ham = hm.hz_sparse([kx, ky], b, params, solver = solver, lattice_reg = lattice_reg, ignorestrain = ignorestrain, axial = axial, bia = bia, ignore_magnxy = ignore_magnxy, kterms = kterms)
+	if split != 0.0:
+		hamsplit = split * hm.hsplit_full(params, splittype, k = [kx, ky], bia = bia, lattice_reg = lattice_reg)
+		ham += hamsplit
+	if pot is not None:
+		hpot = hm.hz_sparse_pot(params, pot)
+		ham += hpot
+	show_job_monitor("%s  C done (%f s)" % (job_monitor_k_b(k, b), rtime() - t0))
+
+	t0 = rtime()
+	show_job_monitor("%s  D start" % job_monitor_k_b(k, b))
+	if solver is not None:
+		eival, eivec = solver.solve(ham)
+	else:
+		eival, eivec = eigsh(ham, neig, sigma = energy)  # Fallback to eigsh, if no solver configured
+	show_job_monitor("%s  D done (%f s)" % (job_monitor_k_b(k, b), rtime() - t0))
+
+	# either return eigenvectors, or observables only, if they are specified
+	ddp = DiagDataPoint(k, eival, eivec, paramval = b).calculate_observables(params, obs, obs_prop = obs_prop, overlap_eivec = overlap_eivec, magn = b)
+	if berry:
+		berry_dk = get_config_num('berry_dk', minval = 0)
+		if berry_dk == 0:
+			sys.stderr.write("ERROR (diagonalization.hz): Berry curvature momentum step must be a positive number.\n")
+			raise ValueError
+		which = berry if isinstance(berry, tuple) else None
+		bc_val, bc_ei, _ = berrycurv_k(ddp, hm.hz_sparse_split, params, dk = berry_dk, which = which, lattice_reg = lattice_reg, split = split, ignorestrain = ignorestrain, axial = axial)
+		ddp.set_observable_value('berry', bc_ei, np.asarray(bc_val))
+		ibc_val = ddp.get_observable('berry') * ddp.get_observable('isopz')
+		ddp.set_observable_value('berryiso', np.arange(0, ddp.neig), ibc_val)
+
+	# Wave functions
+	if isinstance(wflocations, (list, np.ndarray, VectorGrid)):
+		k_numeric = k.len()
+		for j, wfloc in enumerate(wflocations):
+			if isinstance(wfloc, Vector) and wfloc - k < 1e-9:
+				return_eivec = True
+				break
+			elif isinstance(wfloc, (int, float, np.integer, np.floating)) and np.abs(wfloc - k_numeric) < 1e-9:
+				return_eivec = True
+				break
+
+	if return_eivec is None:
+		return_eivec = (obs is None or obs == [])
+	if not return_eivec:
+		ddp.delete_eivec()
+	return ddp
+
+def hz_k0(
+	params, energy = 0.0, neig = 50, lattice_reg = False, split = 0.0,
+	splittype = 'auto', ignorestrain = False, axial = True,
+	bia = False, pot = None, return_eivec = False, k0 = 0,
+	b0 = 0, solver = None, bandtype_warning_level = 1, **ignored_opts):
+	"""Diagonalization for 2D geometry at zero momentum and magnetic field.
+	Also calculate band characters. This data may help to determine the band
+	indices at zero.
+
+	Arguments:
+	params         PhysParams instance.
+	energy         Float. Target energy of the shift-and-invert algorithm.
+	neig           Integer. Number of eigenvalues.
+	lattice_reg    True or False. Whether to apply lattice regularization
+	               (x -> sin x).
+	split          Float. Amount of degeneracy lifting.
+	splittype      String. Type of degeneracy lifting.
+	ignorestrain   True or False. If True, do not include strain terms in the
+	               Hamiltonian.
+	pot            Array. Potential V(z) in meV as function of position.
+	axial          True or False. If True, apply axial approximation. If False,
+	               include non-axial terms in the Hamiltonian.
+	bia            True or False. If True, include BIA terms in the Hamiltonian.
+	k0             Vector, float, or None. Momentum value. If None, zero
+	               momentum.
+	b0             Vector, float, or None. Magnetic field value. If None, zero
+	               magnetic field.
+	solver		   DiagSolver instance
+	bandtype_warning_level
+	               0, 1, 2. Whether to show no, some, or all warnings from the
+	               band_types function.
+
+	Returns:
+	A DiagDataPoint instance.
+	"""
+	if k0 is None or k0 == 0:
+		k0 = Vector(0.0, 0.0, astype = 'xy')
+		kx, ky = 0.0, 0.0
+	elif isinstance(k0, Vector):
+		kx, ky = k0.xy()
+	else:
+		raise TypeError("Argument k0 must be a Vector instance or None")
+	if b0 is None or b0 == 0:
+		b0 = Vector(0.0, astype = 'z')
+	else:
+		raise TypeError("Argument k0 must be a Vector instance or None")
+	if isinstance(energy, list) and len(energy) == 1:
+		energy = energy[0]
+
+	t0 = rtime()
+	show_job_monitor("%6.3f  C start" % 0)
+	kterms = hm.h_kterms(params, axial = axial, verbose = 'verbose' in sys.argv) if params.lattice_transformed() else None
+
+	ham = hm.hz_sparse(
+		[kx, ky], 0.0, params, solver = solver, lattice_reg = lattice_reg,
+		ignorestrain = ignorestrain, axial = axial, bia = bia, kterms = kterms
+	)
+	if split != 0.0:
+		hamsplit = split * hm.hsplit_full(params, splittype, k = [kx, ky], kdim = 2, bia = bia, lattice_reg = lattice_reg)
+		ham += hamsplit
+	if pot is not None:
+		hpot = hm.hz_sparse_pot(params, pot)
+		ham += hpot
+	show_job_monitor("%6.3f  C done (%f s)" % (0, rtime() - t0))
+
+	t0 = rtime()
+	show_job_monitor("%6.3f  D start" % 0)
+	if solver is not None:
+		neig_old = solver.neig
+		solver.neig = neig
+		eival, eivec = solver.solve(ham)
+		solver.neig = neig_old
+	elif isinstance(energy, list):  # Fallback to eigsh (multiple targetenergy), if no solver configured
+		eival, eivec = eigsh(ham, neig, sigma=energy[0])
+		temp_ddp = DiagDataPoint(0, eival, eivec)
+		intervals = [intervaltools.from_eivals(eival, energy[0])]
+		for e in energy[1:]:
+			eival, eivec = eigsh(ham, neig, sigma=e)
+			temp_ddp.extend_by(0, eival, eivec)
+			intervals.append(intervaltools.from_eivals(eival, e))
+		intervals = intervaltools.normalize(intervals)
+		if len(intervals) > 1:
+			sys.stderr.write("ERROR (diagonalization.hz_k0): Disconnected eigenvalue ranges: " + ", ".join(["[%.3f, %.3f]" % i for i in intervals]) + ".\n")
+			exit(1)
+		eival, eivec = temp_ddp.eival, temp_ddp.eivec
+	else:  # Fallback to eigsh, if no solver configured
+		eival, eivec = eigsh(ham, neig, sigma=energy)
+	show_job_monitor("%6.3f  D done (%f s)" % (0, rtime() - t0))
+	neig = len(eival)
+
+	if b0 == 0.0 and not bia:
+		bandtypes = band_types(params, eivec, warning_level = bandtype_warning_level, k = k0)
+	else:
+		# repeat calculation without magnetic field and without bia
+		show_job_monitor("%6.3f  C start" % 0)
+		ham = hm.hz_sparse(
+			[kx, ky], b0, params, solver = solver, lattice_reg = lattice_reg,
+			ignorestrain = ignorestrain, axial = axial, bia = False, kterms = kterms
+		)
+		if split != 0.0:
+			hamsplit = split * hm.hsplit_full(params, splittype, k = [kx, ky], kdim = 2, bia = False, lattice_reg = lattice_reg)
+			ham += hamsplit
+		if pot is not None:
+			hpot = hm.hz_sparse_pot(params, pot)
+			ham += hpot
+		show_job_monitor("%6.3f  C done (%f s)" % (0, rtime() - t0))
+
+		t0 = rtime()
+		show_job_monitor("%6.3f  D start" % 0)
+		if solver is not None:
+			neig_old = solver.neig
+			# We need a few extra eigenvalues here to make it work, 5 seems to be a good choice. TODO: Find out why.
+			solver.neig += 5
+			eival0, eivec0 = solver.solve(ham)
+			solver.neig = neig_old
+		elif isinstance(energy, list):  # Fallback to eigsh (multiple targetenergy), if no solver configured
+			eival0, eivec0 = eigsh(ham, neig + 5, sigma=energy[0])
+			temp_ddp = DiagDataPoint(0, eival0, eivec0)
+			for e in energy[1:]:
+				eival0, eivec0 = eigsh(ham, neig + 5, sigma=e)
+				temp_ddp.extend_by(0, eival0, eivec0)
+			eival0, eivec0 = temp_ddp.eival, temp_ddp.eivec
+		else:  # Fallback to eigsh (single targetenergy), if no solver configured
+			eival0, eivec0 = eigsh(ham, neig + 5, sigma=energy)
+		show_job_monitor("%6.3f  D done (%f s)" % (0, rtime() - t0))
+		bandtypes0 = band_types(params, eivec0, warning_level = bandtype_warning_level, k = k0)
+
+		overlap = np.abs(eivec0.transpose().conjugate() @ eivec)**2
+
+		maxoverlap = np.max(overlap, axis=0)
+		maxoverlapat = np.argmax(overlap, axis=0)
+		if "verbose" in sys.argv:
+			print("Overlap:", overlap)
+			print("Max:", maxoverlap, "at", maxoverlapat)
+			print("B =", b0, 0.0)
+			for i in range(0, neig):
+				print(i, eival[i], end=' ')
+				print(maxoverlapat[i], eival0[maxoverlapat[i]], end=' ')
+				print(bandtypes0[maxoverlapat[i]], maxoverlap[i])
+		bandtypes = [bandtypes0[maxoverlapat[i]] if maxoverlap[i] >= 0.9 else bandtypes0[maxoverlapat[i]] + '?' if maxoverlap[i] >= 0.5 else '??' for i in range(0, neig)]
+		sys.stderr.write("Warning (diagonalization.hz_k0): Band types are estimated, because they cannot be calculated exactly for B != 0 or with BIA.\n")
+		print("Confidence level (= minimum overlap):", min(maxoverlap))
+
+	ddp = DiagDataPoint((kx, ky), eival, eivec)
+	if not return_eivec:
+		ddp.delete_eivec()
+	ddp.char = bandtypes
+	return ddp
+
+### Symbolic 2D + magn routines
+def hsym(
+	k_b, h_sym, params, energy = 0.0, neig = 50, obs = None, pot = None,
+	orbital_magn = True, obs_prop = None, solver = None, **ignored_opts):
+	"""Diagonalization for 2D geometry for one value of momentum and magnetic field, version for symbolic Hamiltonians.
+
+	Arguments:
+	k_b            Vector or float, or 2-tuple of those. Momentum, or momentum
+	               and magnetic field.
+	h_sym          SymbolicHamiltonian instance. The Hamiltonian.
+	params         PhysParams instance.
+	energy         Float. Target energy of the shift-and-invert algorithm.
+	neig           Integer. Number of eigenvalues.
+	obs            List of strings or None. Observable ids of the observables
+	               that will be calculated. If None or empty list, do not do
+	               anything.
+	pot            Array. Potential V(z) in meV as function of position.
+	return_eivec   True, False or None. If True, keep eigenvector data in the
+	               return value (DiagDataPoint instance). If False, discard
+	               them. If None, discard them only if observables have been
+	               calculated.
+	berry          True or False. If True, calculate Berry curvature.
+	obs_prop       ObservableList instance containing all observable properties.
+	solver		   DiagSolver instance
+
+	Returns:
+	A DiagDataPoint instance.
+	"""
+	if orbital_magn is False:
+		k = k_b[0] if isinstance(k_b, tuple) else k_b
+		magn = Vector(0.0, astype = "z")
+	elif orbital_magn is True:
+		k = k_b[0] if isinstance(k_b, tuple) else k_b
+		magn = k_b[1] if isinstance(k_b, tuple) else Vector(0.0, astype = "z")
+	elif isinstance(k_b, tuple):  # orbitalmagn is something else
+		raise ValueError("Duplicate input for magnetic field. Set orbital_magn = True, not to a number.")
+	else:
+		k, magn = k_b, Vector(0.0, astype = "z")
+	if isinstance(k, Vector):
+		kx, ky = k.xy()
+		magn = 0.0
+	else:
+		raise TypeError("Argument k must be a Vector instance")
+	eB = eoverhbar * magn.z() if isinstance(magn, Vector) else eoverhbar * magn
+
+	t0 = rtime()
+	show_job_monitor("%s  C start" % job_monitor_k_b(k, magn))
+	ham = h_sym.evaluate((kx, ky), eB)
+	if pot is not None:
+		hpot = hm.hz_sparse_pot(params, pot)
+		ham += hpot
+	show_job_monitor("%s  C done (%f s)" % (job_monitor_k_b(k, magn), rtime() - t0))
+
+	t0 = rtime()
+	show_job_monitor("%s  D start" % job_monitor_k_b(k, magn))
+	if solver is not None:
+		eival, eivec = solver.solve(ham)
+	else:
+		eival, eivec = eigsh(ham, neig, sigma=energy)  # Fallback to eigsh, if no solver configured
+	show_job_monitor("%s  D done (%f s)" % (job_monitor_k_b(k, magn), rtime() - t0))
+
+	# either return eigenvectors, or observables only, if they are specified
+	ddp = DiagDataPoint(k, eival, eivec, paramval = magn).calculate_observables(params, obs, obs_prop = obs_prop, magn = magn)
+	if not (obs is None or obs == []):
+		ddp.delete_eivec()
+	return ddp
+
+
+def hsym_k0(
+	h_sym, params, energy = 0.0, neig = 50, lattice_reg = False, split = 0.0,
+	splittype = 'auto', ignorestrain = False,
+	pot = None, return_eivec = False, k0 = 0, orbital_magn = True,
+	bia = False, solver = None, bandtype_warning_level = 1, **ignored_opts):
+	"""Diagonalization for 2D geometry at zero momentum and magnetic field, version for symbolic Hamiltonian.
+	Also calculate band characters. This data may help to determine the band
+	indices at zero.
+
+	Arguments:
+	h_sym          SymbolicHamiltonian instance. The Hamiltonian.
+	params         PhysParams instance.
+	energy         Float. Target energy of the shift-and-invert algorithm.
+	neig           Integer. Number of eigenvalues.
+	lattice_reg    True or False. Whether to apply lattice regularization
+	               (x -> sin x).
+	split          Float. Amount of degeneracy lifting.
+	splittype      String. Type of degeneracy lifting.
+	ignorestrain   True or False. If True, do not include strain terms in the
+	               Hamiltonian.
+	pot            Array. Potential V(z) in meV as function of position.
+	return_eivec   True, False or None. If True, keep eigenvector data in the
+	               return value (DiagDataPoint instance). If False, discard
+	               them. If None, discard them only if observables have been
+	               calculated.
+	bia            True or False. If True, include BIA terms in the Hamiltonian.
+	k0             Vector, float, or None. Momentum value. If None, zero
+	               momentum.
+	solver		   DiagSolver instance
+	bandtype_warning_level
+	               0, 1, 2. Whether to show no, some, or all warnings from the
+	               band_types function.
+
+	Returns:
+	A DiagDataPoint instance.
+	"""
+	if k0 is None or k0 == 0:
+		k0 = Vector(0.0, 0.0, astype = 'xy')
+		kx, ky = 0.0, 0.0
+	elif isinstance(k0, Vector):
+		kx, ky = k0.xy()
+	else:
+		raise TypeError("Argument k0 must be a Vector instance or None")
+	if orbital_magn is True:
+		raise ValueError("No value for magnetic field. Set orbital_magn to a number.")
+	elif orbital_magn is False:
+		magn = Vector(0.0, astype = "z")
+	else:
+		magn = orbital_magn
+	eB = eoverhbar * magn.z() if isinstance(magn, Vector) else eoverhbar * magn
+
+	t0 = rtime()
+	show_job_monitor("%6.3f  C start" % 0)
+	ham = h_sym.evaluate((kx, ky), eB)
+	if pot is not None:
+		hpot = hm.hz_sparse_pot(params, pot)
+		ham += hpot
+	show_job_monitor("%6.3f  C done (%f s)" % (0, rtime() - t0))
+
+	t0 = rtime()
+	show_job_monitor("%6.3f  D start" % 0)
+	if solver is not None:
+		neig_old = solver.neig
+		solver.neig = neig
+		eival, eivec = solver.solve(ham)
+		solver.neig = neig_old
+	elif isinstance(energy, list):  # Fallback to eigsh (multiple targetenergy), if no solver configured
+		eival, eivec = eigsh(ham, neig, sigma=energy[0])
+		temp_ddp = DiagDataPoint(0, eival, eivec)
+		intervals = [intervaltools.from_eivals(eival, energy[0])]
+		for e in energy[1:]:
+			eival, eivec = eigsh(ham, neig, sigma=e)
+			temp_ddp.extend_by(0, eival, eivec)
+			intervals.append(intervaltools.from_eivals(eival, e))
+		intervals = intervaltools.normalize(intervals)
+		if len(intervals) > 1:
+			sys.stderr.write("ERROR (diagonalization.hz_k0): Disconnected eigenvalue ranges: " + ", ".join(["[%.3f, %.3f]" % i for i in intervals]) + ".\n")
+			exit(1)
+		eival, eivec = temp_ddp.eival, temp_ddp.eivec
+	else:  # Fallback to eigsh, if no solver configured
+		eival, eivec = eigsh(ham, neig, sigma=energy)
+	show_job_monitor("%6.3f  D done (%f s)" % (0, rtime() - t0))
+
+	if magn == 0.0 and not bia:
+		bandtypes = band_types(params, eivec, warning_level = bandtype_warning_level, k = k0)
+	else:
+		# repeat calculation without magnetic field
+		show_job_monitor("%6.3f  C start" % 0)
+		if bia:
+			modelopts_nobia = {'lattice_reg': lattice_reg, 'split': split, 'splittype': splittype, 'ignorestrain': ignorestrain, 'axial': True, 'bia': False}
+			h_sym_nobia = SymbolicHamiltonian(hm.hz_sparse_split, (params,), kwds = modelopts_nobia, hmagn = True)
+		else:
+			h_sym_nobia = h_sym
+		ham = h_sym_nobia.evaluate((kx, ky), 0.0)
+		if pot is not None:
+			hpot = hm.hz_sparse_pot(params, pot)
+			ham += hpot
+		show_job_monitor("%6.3f  C done (%f s)" % (0, rtime() - t0))
+
+		t0 = rtime()
+		show_job_monitor("%6.3f  D start" % 0)
+		if solver is not None:
+			eival0, eivec0 = solver.solve(ham)
+		elif isinstance(energy, list):  # Fallback to eigsh (multiple targetenergy), if no solver configured
+			eival0, eivec0 = eigsh(ham, neig + 5, sigma=energy[0])
+			temp_ddp = DiagDataPoint(0, eival0, eivec0)
+			for e in energy[1:]:
+				eival0, eivec0 = eigsh(ham, neig + 5, sigma=e)
+				temp_ddp.extend_by(0, eival0, eivec0)
+			eival0, eivec0 = temp_ddp.eival, temp_ddp.eivec
+		else:  # Fallback to eigsh (single targetenergy), if no solver configured
+			eival0, eivec0 = eigsh(ham, neig + 5, sigma=energy)
+		show_job_monitor("%6.3f  D done (%f s)" % (0, rtime() - t0))
+		bandtypes0 = band_types(params, eivec0, warning_level = bandtype_warning_level, k = k0)
+
+		overlap = np.abs(eivec0.transpose().conjugate() @ eivec)**2
+
+		maxoverlap = np.max(overlap, axis=0)
+		maxoverlapat = np.argmax(overlap, axis=0)
+		if "verbose" in sys.argv:
+			print("Overlap:", overlap)
+			print("Max:", maxoverlap, "at", maxoverlapat)
+			print("B =", magn, 0.0)
+			for i in range(0, len(eival)):
+				print(i, eival[i], end=' ')
+				print(maxoverlapat[i], eival0[maxoverlapat[i]], end=' ')
+				print(bandtypes0[maxoverlapat[i]], maxoverlap[i])
+		bandtypes = [bandtypes0[maxoverlapat[i]] if maxoverlap[i] >= 0.9 else bandtypes0[maxoverlapat[i]] + '?' if maxoverlap[i] >= 0.5 else '??' for i in range(0, neig)]
+		sys.stderr.write("Warning (diagonalization.hz_k0): Band types are estimated, because they cannot be calculated exactly for B != 0 or with BIA.\n")
+		print("Confidence level (= minimum overlap):", min(maxoverlap))
+
+	ddp = DiagDataPoint((kx, ky), eival, eivec)
+	if not return_eivec:
+		ddp.delete_eivec()
+	ddp.char = bandtypes
+	return ddp
+
+### 1D DIAGONALIZATION ROUTINES ###
+
+def hzy(
+	k_b, params, energy = 0.0, neig = 50, periodicy = False, lattice_reg = False,
+	split = 0.0, splittype = 'auto', ignorestrain = False,
+	gauge_zero = 0.0, obs = None, axial = True, return_eivec = None,
+	return_bandtypes = False, overlap_eivec = None, pot = None, poty = None,
+	obs_prop = None, bia = False, ignore_magnxy = False, solver = None,
+	bandtype_warning_level = 1):
+	"""Diagonalization for 1D geometry for one value of momentum and magnetic field.
+
+	Arguments:
+	k_b            Vector or float, or 2-tuple of those. Momentum, or momentum
+	               and magnetic field.
+	params         PhysParams instance.
+	energy         Float. Target energy of the shift-and-invert algorithm.
+	neig           Integer. Number of eigenvalues.
+	periodicy      True or False. Whether the geometry in the transversal (y)
+	               direction is periodic/cylindrical (True) or finite (False).
+	lattice_reg    True or False. Whether to apply lattice regularization
+	               (x -> sin x).
+	split          Float. Amount of degeneracy lifting.
+	splittype      String. Type of degeneracy lifting.
+	ignorestrain   True or False. If True, do not include strain terms in the
+	               Hamiltonian.
+	gauge_zero     Float. Shifts the gauge field by this amount. See
+	               hamiltonian/full.py.
+	obs            List of strings or None. Observable ids of the observables
+	               that will be calculated. If None or empty list, do not do
+	               anything.
+	axial          True or False. If True, apply axial approximation. If False,
+	               include non-axial terms in the Hamiltonian.
+	overlap_eivec  A dict, whose keys are the band labels (characters) and
+	               values are the eigenvectors for which overlaps can be
+	               calculated with the eigenvectors of this Hamiltonian.
+	return_eivec   True, False or None. If True, keep eigenvector data in the
+	               return value (DiagDataPoint instance). If False, discard
+	               them. If None, discard them only if observables have been
+	               calculated.
+   	pot            Array, 1- or 2-dimensional. Potential V(z) (1-dim) or V(z, y)
+   	               (2-dim) in meV as function of position.
+   	poty           Array, 1-dimensional. Only if pot is also 1-dimensional or
+   	               None. Potential V(y) as function of position.
+	obs_prop       ObservableList instance containing all observable properties.
+	bia            True or False. If True, include BIA terms in the Hamiltonian.
+	ignore_magnxy  True or False. If True, neglect the in-plane components of
+	               the orbital part of the magnetic field. Only for legacy
+	               reasons, e.g., comparing with results that were calculated
+	               when these terms were not yet implemented.
+	solver		   DiagSolver instance
+	bandtype_warning_level
+	               0, 1, 2. Whether to show no, some, or all warnings from the
+	               band_types function.
+
+	Returns:
+	A DiagDataPoint instance.
+	"""
+	if isinstance(k_b, tuple):
+		k, b = k_b
+	else:
+		k, b = k_b, 0.0
+	if isinstance(k, Vector):
+		kx, ky = k.xy()
+	elif isinstance(k, (float, np.floating, int, np.integer)):
+		k = Vector(k, astype = 'x')
+		kx, ky = float(k), 0.0
+	else:
+		raise TypeError("Argument k must be a Vector or a float instance")
+	if abs(ky) > 1e-6:
+		sys.stderr.write("ERROR (diagonalization.hzy): y component of the momentum must be zero\n")
+
+	t0 = rtime()
+	show_job_monitor("%s  C start" % job_monitor_k_b(k, b))
+	kterms = hm.h_kterms(params, axial = axial) if params.lattice_transformed() else None
+	if b == 0.0:
+		ham = hm.hzy_sparse(
+			kx, 0.0, params, periodicy = periodicy, solver = solver,
+			lattice_reg = lattice_reg, ignorestrain = ignorestrain,
+			axial = axial, bia = bia, kterms = kterms
+		)
+	else:
+		ham = hm.hzy_sparse_magn(
+			kx, b, params, periodicy = periodicy, solver = solver,
+			lattice_reg = lattice_reg, ignorestrain = ignorestrain,
+			gauge_zero = gauge_zero, axial = axial, bia = bia,
+			ignore_magnxy = ignore_magnxy, kterms = kterms
+		)
+	if split != 0.0:
+		hamsplit = split * hm.hsplit_full(params, splittype, k = [kx], bia = bia, lattice_reg = lattice_reg)
+		ham += hamsplit
+
+	if pot is not None:
+		ham += hm.h_pot_1d(pot, params)
+	if poty is not None:
+		ham += hm.h_pot_1d(poty, params, axis = 'y')
+
+	show_job_monitor("%s  C done (%f s) (nnz = %i, dim = %i, size (bytes) = %i (data) + %i (ind.pointer) + %i (indices) = %i)" %
+	                 (job_monitor_k_b(k, b), rtime() - t0, ham.nnz, ham.shape[0], ham.data.nbytes, ham.indptr.nbytes, ham.indices.nbytes, ham.data.nbytes + ham.indptr.nbytes + ham.indices.nbytes))
+
+	t0 = rtime()
+	show_job_monitor("%s  D start" % job_monitor_k_b(k, b))
+	if solver is not None:
+		eival, eivec = solver.solve(ham)
+	else:
+		eival, eivec = eigsh(ham, neig, sigma=energy)  # Fallback to eigsh, if no solver configured
+	show_job_monitor("%s  D done (%f s)" % (job_monitor_k_b(k, b), rtime() - t0))
+	# NOTE:
+	# For very large matrices, the shift-and-invert algorithm provided by eigsh
+	# (which invokes SuperLU) fails with a MemoryError when the matrix occupies
+	# more than about 60 GB.  Possibly, this error may be avoided by providing
+	# an explicit inverse.  See, for example:
+	# https://github.com/scipy/scipy/issues/4170
+	# TODO: Investigate this.
+
+	# return eigenvectors, observables, and band types depending on the option values
+	# NOTE: We consider return_eivec = None as a valid value, meaning that we determine
+	# automatically whether eigenvectors should be returned. Hence the explicit
+	# 'return_eivec == True' and 'return_eivec == False'.
+	t0 = rtime()
+	show_job_monitor("%6.3f  O start" % kx)
+	ddp = DiagDataPoint(k, eival, eivec, paramval = b).calculate_observables(params, obs, obs_prop = obs_prop, overlap_eivec = overlap_eivec, magn = b)
+	show_job_monitor("%6.3f  O done (%f s)" % (kx, rtime() - t0))
+	if return_bandtypes:
+		bandtypes = band_types(params, eivec, warning_level = bandtype_warning_level, k = k, b = b)
+		ddp.char = bandtypes
+	if not return_eivec:
+		ddp.delete_eivec()
+	return ddp
+
+
+### LANDAU LEVEL DIAGONALIZATION ROUTINES ###
+
+def hz_ll(
+	idx, magn, ll_max, params, energy = 0.0, neig = 50, lattice_reg = False,
+	split = 0.0, splittype = 'auto', ignorestrain = False, obs = None, pot = None, axial = True,
+	return_eivec = False, overlap_eivec = None, obs_prop = None,
+	solver = None, **ignored_opts):
+	"""Diagonalization for Landau-level Hamiltonian for one value of magnetic field.
+
+	Arguments:
+	idx            Integer. Index in the grid (of magnetic field values).
+	magn           Vector or float. Magnetic field.
+	ll_max         Integer. Maximum LL index.
+	params         PhysParams instance.
+	energy         Float. Target energy of the shift-and-invert algorithm.
+	neig           Integer. Number of eigenvalues.
+	lattice_reg    True or False. Whether to apply lattice regularization
+	               (x -> sin x).
+	split          Float. Amount of degeneracy lifting.
+	splittype      String. Type of degeneracy lifting.
+	ignorestrain   True or False. If True, do not include strain terms in the
+	               Hamiltonian.
+	obs            List of strings or None. Observable ids of the observables
+	               that will be calculated. If None or empty list, do not do
+	               anything.
+	pot            Array. Potential V(z) in meV as function of position.
+	axial          True or False. If True, apply axial approximation. If False,
+	               include non-axial terms in the Hamiltonian.
+	overlap_eivec  A dict, whose keys are the band labels (characters) and
+	               values are the eigenvectors for which overlaps can be
+	               calculated with the eigenvectors of this Hamiltonian.
+	return_eivec   True, False or None. If True, keep eigenvector data in the
+	               return value (DiagDataPoint instance). If False, discard
+	               them. If None, discard them only if observables have been
+	               calculated.
+	berry          NOT USED
+	obs_prop       ObservableList instance containing all observable properties.
+	solver		   DiagSolver instance
+
+	Returns:
+	A DiagDataPoint instance.
+	"""
+	if isinstance(magn, Vector):
+		bx, by, bz = magn.xyz()
+		if abs(bz) > 1e-10 and (abs(bx) > 1e-6 or abs(by) > 1e-6):
+			sys.stderr.write("ERROR (diagonalization.hz_ll): In-plane field components in combination with LLs not (yet) implemented.\n")
+			raise ValueError("Bx, By with Bz not yet implemented")
+		magn = Vector(bz, astype = 'z')
+	else:
+		bz = magn
+
+	t0 = rtime()
+	show_job_monitor("B=%s  LL start" % magn)
+
+	eival = []
+	eivec = []
+	ll_n = []
+	for n in range(-2, ll_max + 1):
+		ham = hm.hz_sparse_ll(magn, n, params, lattice_reg = lattice_reg, ignorestrain = ignorestrain, axial = axial)
+		if split != 0.0:
+			hamsplit = split * hm.hsplit_full(params, splittype, k = None, bia = False, lattice_reg = lattice_reg)
+			whichbands = whichbands_ll(n, params.norbitals, bz)  # orbitals in this LL
+			sel = np.add.outer(np.arange(0, params.nz) * params.norbitals, whichbands).flatten()  # expand indices over z degree of freedom
+			ham += hamsplit[sel, :][:, sel]
+
+		if pot is not None:
+			if params.norbitals == 8:
+				nbands = 1 if n == -2 else 4 if n == -1 else 7 if n == 0 else 8
+			else:
+				nbands = 1 if n == -2 else 3 if n == -1 else 5 if n == 0 else 6
+			hpot = hm.hz_sparse_pot(params, pot, norb = nbands)
+			ham += hpot
+
+		if solver is not None:
+			eival1, eivec1 = solver.solve(ham)
+		else:
+			eival1, eivec1 = eigsh(ham, neig, sigma=energy)  # Fallback to eigsh, if no solver configured
+
+		eival.extend(eival1)
+		eivec.extend(scaleup_eivec(eivec1, params, len(eival1), n, bz))
+		ll_n.extend(np.full(len(eival1), n))
+
+	show_job_monitor("B=%s  LL done (%f s)" % (magn, rtime() - t0))
+
+	# either return eigenvectors, or observables only, if they are specified
+	ddp = DiagDataPoint(0.0, np.array(eival), np.array(eivec), paramval = magn)
+	ddp.llindex = np.array(ll_n)
+	ddp.calculate_observables(params, obs, obs_prop = obs_prop, overlap_eivec = overlap_eivec, magn = magn)
+	if not return_eivec:
+		ddp.delete_eivec()
+	return ddp
+
+def hsym_ll(
+	idx, magn, ll_max, h_sym, params, energy = 0.0, neig = 50, obs = None,
+	pot = None, bia = False, return_eivec = False,
+	overlap_eivec = None, berry = False,
+	transitions = False, transitions_range = None, obs_prop = None,
+	wflocations = None, solver = None, **ignored_opts):
+	"""Diagonalization for Landau-level Hamiltonian for one value of magnetic field, version for symbolic Hamiltonian
+
+	Arguments:
+	idx            Integer. Index in the grid (of magnetic field values).
+	magn           Vector or float. Magnetic field.
+	ll_max         Integer. Maximum LL index.
+	h_sym          SymbolicHamiltonian instance. The Hamiltonian.
+	params         PhysParams instance.
+	energy         Float. Target energy of the shift-and-invert algorithm.
+	neig           Integer. Number of eigenvalues.
+	obs            List of strings or None. Observable ids of the observables
+	               that will be calculated. If None or empty list, do not do
+	               anything.
+	pot            Array. Potential V(z) in meV as function of position.
+	bia            True or False. If True, include BIA terms in the Hamiltonian.
+	overlap_eivec  A dict, whose keys are the band labels (characters) and
+	               values are the eigenvectors for which overlaps can be
+	               calculated with the eigenvectors of this Hamiltonian.
+	return_eivec   True, False or None. If True, keep eigenvector data in the
+	               return value (DiagDataPoint instance). If False, discard
+	               them. If None, discard them only if observables have been
+	               calculated.
+	berry          2-tuple, True or False. If a 2-tuple of integers, calculate
+	               Berry curvature for bands with indices in this range. If
+	               True, calculate Berry curvature for all states. If False, do
+	               not calculate Berry curvature.
+	transitions    True or False, or float. If True or a float, calculate
+	               optical transitions, where a float indicates the minimum
+	               transition amplitude, below which the transitions are
+	               discarded. If False, do not calculate transitions.
+	transitions_range  2-tuple or None. If set, calculate optical transitions
+	                   only for states in that energy range. If None, do not
+	                   restrict to an energy range.
+	obs_prop       ObservableList instance containing all observable properties.
+	wflocations    List, array, or VectorGrid instance. Contains the magnetic
+	               field values where wave functions should be saved (plot and
+	               table). None if no wave functions should be saved.
+	solver		   DiagSolver instance
+
+	Returns:
+	A DiagDataPoint instance.
+	"""
+	if isinstance(magn, Vector):
+		bx, by, bz = magn.xyz()
+		if abs(bz) > 1e-10 and (abs(bx) > 1e-6 or abs(by) > 1e-6):
+			sys.stderr.write("ERROR (diagonalization.hsym_ll): In-plane field components in combination with LLs can only be calculated in 'full' LL mode.\n")
+			raise ValueError("Bx, By with Bz not implemented. Use 'full' LL mode.")
+		magn = bz
+	if bia:
+		sys.stderr.write("ERROR (diagonalization.hsym_ll): BIA in combination with LLs can only be calculated in 'full' LL mode.\n")
+		raise ValueError("BIA not implemented. Use 'full' LL mode.")
+
+	t0 = rtime()
+	show_job_monitor("B=%s  LL start" % (magn_monitor(magn)))
+
+	eival = []
+	eivec = []
+	ll_n = []
+	magnz = magn.z() if isinstance(magn, Vector) else magn
+	delta_n_vec = delta_n_ll(params.norbitals, magnz)
+	for n in range(-2, ll_max + 1):
+		ham = h_sym.ll_evaluate(n, magn, delta_n_vec)
+		if pot is not None:
+			nbands = np.count_nonzero(delta_n_vec + n >= 0)
+			hpot = hm.hz_sparse_pot(params, pot, norb = nbands)
+			ham += hpot
+
+		if solver is not None:
+			eival1, eivec1 = solver.solve(ham)
+		else:
+			eival1, eivec1 = eigsh(ham, neig, sigma=energy)  # Fallback to eigsh, if no solver configured
+
+		eival.extend(eival1)
+		eivec.extend(scaleup_eivec(eivec1, params, len(eival1), n, magn))
+		ll_n.extend(np.full(len(eival1), n))
+
+	show_job_monitor("B=%s  LL done (%f s)" % (magn_monitor(magn), rtime() - t0))
+
+	# either return eigenvectors, or observables only, if they are specified
+	ddp = DiagDataPoint(0.0, np.array(eival), np.array(eivec), paramval = magn)
+	ddp.llindex = np.array(ll_n)
+	ddp.calculate_observables(params, obs, obs_prop = obs_prop, overlap_eivec = overlap_eivec, magn = magn)
+	if berry:
+		which = berry if isinstance(berry, tuple) else None
+		if magn == 0.0:
+			ddp.set_observable_value('chern', None, 0.0)
+			ddp.set_observable_value('chernsim', None, 0.0)
+		else:
+			ch_val, ch_ei, ch_ll = chernnumber_ll(ddp, magn, h_sym, which = which, norb = params.norbitals)
+			ddp.set_observable_value('chern', ch_ei, np.asarray(ch_val))
+			ddp.set_observable_value('chernsim', None, 1.0)
+	if transitions:
+		ampmin = transitions if isinstance(transitions, (float, np.floating)) else None
+		td = get_transitions(ddp, magn, h_sym, which = transitions_range, ampmin = ampmin, norb = params.norbitals)
+		td.sort(in_place = True)
+		ddp.transitions = td
+	if isinstance(wflocations, (list, np.ndarray, VectorGrid)):
+		wfmagn = magn if isinstance(magn, Vector) else Vector(magn, astype = 'z')
+		for j, wfloc in enumerate(wflocations):
+			if isinstance(wfloc, Vector) and wfloc - wfmagn < 1e-9:
+				return_eivec = True
+				break
+			elif isinstance(wfloc, (int, float, np.integer, np.floating)) and np.abs(wfloc - magnz) < 1e-9:  # magnz is numerical value
+				return_eivec = True
+				break
+
+	if not return_eivec:
+		ddp.delete_eivec()
+	return ddp
+
+def hsym_ll_full(
+	idx, magn, ll_max, h_sym, params, energy = 0.0, neig = 50, obs = None,
+	pot = None, return_eivec = False, overlap_eivec = None, berry = False,
+	transitions = False, transitions_range = None, obs_prop = None,
+	wflocations = None, solver = None, h_sym_opts = None, **ignored_opts):
+	"""Diagonalization for Landau-level Hamiltonian for one value of magnetic field, version for symbolic Hamiltonian for full LL mode
+
+	Arguments:
+	idx            Integer. Index in the grid (of magnetic field values).
+	magn           Vector or float. Magnetic field.
+	ll_max         Integer. Maximum LL index.
+	h_sym          SymbolicHamiltonian instance. The Hamiltonian.
+	params         PhysParams instance.
+	energy         Float. Target energy of the shift-and-invert algorithm.
+	neig           Integer. Number of eigenvalues.
+	obs            List of strings or None. Observable ids of the observables
+	               that will be calculated. If None or empty list, do not do
+	               anything.
+	pot            Array. Potential V(z) in meV as function of position.
+	overlap_eivec  A dict, whose keys are the band labels (characters) and
+	               values are the eigenvectors for which overlaps can be
+	               calculated with the eigenvectors of this Hamiltonian.
+	return_eivec   True, False or None. If True, keep eigenvector data in the
+	               return value (DiagDataPoint instance). If False, discard
+	               them. If None, discard them only if observables have been
+	               calculated.
+	berry          2-tuple, True or False. If a 2-tuple of integers, calculate
+	               Berry curvature for bands with indices in this range. If
+	               True, calculate Berry curvature for all states. If False, do
+	               not calculate Berry curvature.
+	transitions    True or False, or float. If True or a float, calculate
+	               optical transitions, where a float indicates the minimum
+	               transition amplitude, below which the transitions are
+	               discarded. If False, do not calculate transitions.
+	transitions_range  2-tuple or None. If set, calculate optical transitions
+	                   only for states in that energy range. If None, do not
+	                   restrict to an energy range.
+	obs_prop       ObservableList instance containing all observable properties.
+	wflocations    List, array, or VectorGrid instance. Contains the magnetic
+	               field values where wave functions should be saved (plot and
+	               table). None if no wave functions should be saved.
+	solver		   DiagSolver instance
+    h_sym_opts     Modelopts dict for per-DDP construction of symbolic Hamiltonian.
+                   Only required if no constant h_sym can be given.
+
+	Returns:
+	A DiagDataPoint instance.
+	"""
+	t0 = rtime()
+	show_job_monitor("B=%s  C start" % magn_monitor(magn))
+	if h_sym is None:
+		# Calculate a symbolic Hamiltonian, if required, but not given. May be the case if variable in-plane
+		# magnetic fields are present and no single symbolic Hamiltonian can be defined.
+		h_sym = SymbolicHamiltonian(hm.hz_sparse_split, (params,), h_sym_opts, hmagn = False, b0 = magn)
+	ham = hm.hz_sparse_ll_full(h_sym, ll_max, magn, params.norbitals)
+	# Lift LL degeneracy for very small magnetic fields
+	if abs(magn) < 1e-6:
+		hllsplit = 1e-8 * hm.hsplit_ll_full(ll_max, nz = params.nz, norb = params.norbitals)
+		ham += hllsplit
+
+	if pot is not None:
+		hpot = hm.hz_sparse_pot_ll_full(params, ll_max, pot, norb = params.norbitals)
+		ham += hpot
+	show_job_monitor("B=%s  C done (%f s)" % (magn_monitor(magn), rtime() - t0))
+
+	t0 = rtime()
+	show_job_monitor("B=%s  D start" % magn_monitor(magn))
+	if solver is not None:
+		eival, eivec1 = solver.solve(ham)
+	else:
+		eival, eivec1 = eigsh(ham, neig, sigma=energy)  # Fallback to eigsh, if no solver configured
+	# Correct for degeneracy lifting
+	if abs(magn) < 1e-6:
+		print("Degeneracy between Landau levels was lifted at B = %s" % magn)
+		delta_eival = np.real(np.array([np.vdot(eivec1[:, j], hllsplit.dot(eivec1[:, j])) for j in range(0, len(eival))]))
+		eival -= delta_eival
+	magnz = magn.z() if isinstance(magn, Vector) else magn
+	eivec = scaleup_full_eivec(eivec1, params, len(eival), ll_max, magnz)
+	del eivec1
+	show_job_monitor("B=%s  D done (%f s)" % (magn_monitor(magn), rtime() - t0))
+
+	# either return eigenvectors, or observables only, if they are specified
+	ddp = DiagDataPoint(0.0, eival, eivec, paramval = magn)
+
+	# TODO: LL indices
+	# ddp.llindex = ll_n
+	params.ny = ll_max + 3
+	ddp.calculate_observables(params, obs, obs_prop = obs_prop, overlap_eivec = overlap_eivec, magn = magn)
+
+	if berry:
+		which = berry if isinstance(berry, tuple) else None
+		if magn == 0.0:
+			ddp.set_observable_value('chern', None, 0.0)
+			ddp.set_observable_value('chernsim', None, 0.0)
+		else:
+			ch_val, ch_ei, ch_ll = chernnumber_ll_full(ddp, magn, h_sym, ll_max, which = which, norb = params.norbitals)
+			ddp.set_observable_value('chern', ch_ei, np.asarray(ch_val))
+			ddp.set_observable_value('chernsim', None, 1.0)
+	if transitions:
+		ampmin = transitions if isinstance(transitions, (float, np.floating)) else None
+		t0 = rtime()
+		show_job_monitor("B=%s  T start" % magn_monitor(magn))
+		td = get_transitions_full(ddp, magn, h_sym, which = transitions_range, ampmin = ampmin, norb = params.norbitals, nll = ll_max + 3)
+		td.sort(in_place = True, llsort = False)
+		show_job_monitor("B=%s  T done (%f s)" % (magn_monitor(magn), rtime() - t0))
+		ddp.transitions = td
+	if isinstance(wflocations, (list, np.ndarray, VectorGrid)):
+		wfmagn = magn if isinstance(magn, Vector) else Vector(magn, astype = 'z')
+		for j, wfloc in enumerate(wflocations):
+			if isinstance(wfloc, Vector) and wfloc - wfmagn < 1e-9:
+				return_eivec = True
+				break
+			elif isinstance(wfloc, (int, float, np.integer, np.floating)) and np.abs(wfloc - magnz) < 1e-9:  # magnz is numerical value
+				return_eivec = True
+				break
+
+	if not return_eivec:
+		ddp.delete_eivec()
+	return ddp
+
+
+## BULK DIAGONALIZATION ROUTINES
+
+def hbulk(
+		k_b, params, lattice_reg = False, split = 0.0, splittype = 'auto',
+		ignorestrain = False, obs = None, axial = True, bia = False, berry = False,
+		verbose = False, obs_prop = None, return_eivec = False, **ignored_opts):
+	"""Diagonalization for bulk geometry for one value of momentum and magnetic field.
+
+	Arguments:
+	k_b            Vector or float, or 2-tuple of those. Momentum, or momentum
+	               and magnetic field.
+	params         PhysParams instance.
+	lattice_reg    True or False. Whether to apply lattice regularization
+	               (x -> sin x).
+	split          Float. Amount of degeneracy lifting.
+	splittype      String. Type of degeneracy lifting.
+	ignorestrain   True or False. If True, do not include strain terms in the
+	               Hamiltonian.
+	obs            List of strings or None. Observable ids of the observables
+	               that will be calculated. If None or empty list, do not do
+	               anything.
+	axial          True or False. If True, apply axial approximation. If False,
+	               include non-axial terms in the Hamiltonian.
+	return_eivec   True, False or None. If True, keep eigenvector data in the
+	               return value (DiagDataPoint instance). If False, discard
+	               them. If None, discard them only if observables have been
+	               calculated.
+	berry          2-tuple, True or False. If a 2-tuple of integers, calculate
+	               Berry curvature for bands with indices in this range. If
+	               True, calculate Berry curvature for all states. If False, do
+	               not calculate Berry curvature.
+	obs_prop       ObservableList instance containing all observable properties.
+	bia            True or False. If True, include BIA terms in the Hamiltonian.
+	verbose        True of False. If True, print extra diagnostic information to
+	               stdout.
+
+	Returns:
+	A DiagDataPoint instance.
+	"""
+	if isinstance(k_b, tuple):
+		k, b = k_b
+	else:
+		k, b = k_b, 0.0
+	if isinstance(k, Vector):
+		kx, ky, kz = k.xyz()
+	else:
+		raise TypeError("Argument k must be a Vector instance")
+	t0 = rtime()
+	if verbose:
+		show_job_monitor("%s  C start" % job_monitor_k_b(k, b))
+	kterms = hm.h_kterms(params, axial = axial) if params.lattice_transformed() else None
+	ham = hm.hbulk([kx, ky, kz], b, params, lattice_reg = lattice_reg, ignorestrain = ignorestrain, axial = axial, bia = bia, kterms = kterms)
+	if split != 0.0:
+		hamsplit = split * hm.hsplit_full(params, splittype, k = [kx, ky, kz], bia = bia, lattice_reg = lattice_reg)
+		ham += hamsplit
+	if verbose:
+		show_job_monitor("%s  C done (%f s)" % (job_monitor_k_b(k, b), rtime() - t0))
+
+	t0 = rtime()
+	if verbose:
+		show_job_monitor("%s  D start" % job_monitor_k_b(k, b))
+	eival, eivec = eigh(ham)
+	if verbose:
+		show_job_monitor("%s  D done (%f s)" % (job_monitor_k_b(k, b), rtime() - t0))
+	# either return eigenvectors, or observables only, if they are specified
+	ddp = DiagDataPoint(k, eival, eivec, paramval = b).calculate_observables(params, obs, obs_prop = obs_prop, magn = b)
+	if berry:
+		berry_dk = get_config_num('berry_dk', minval = 0)
+		if berry_dk == 0:
+			sys.stderr.write("ERROR (diagonalization.hbulk): Berry curvature momentum step must be a positive number.\n")
+			raise ValueError
+		which = berry if isinstance(berry, tuple) else None
+		bc_val, bc_ei, _ = berrycurv_k(ddp, hm.hbulk, params, dk = berry_dk, which = which, lattice_reg = lattice_reg, split = split, ignorestrain = ignorestrain, axial = axial, dim = 3)
+		for bc_i, o in zip(bc_val, ["berryx", "berryy", "berryz"]):
+			ddp.set_observable_value(o, bc_ei, np.asarray(bc_i))
+	if not return_eivec:
+		ddp.delete_eivec()
+	return ddp
+
+
+def hbulk_ll(
+		idx, k_b, ll_max, params, lattice_reg = False, split = 0.0,
+		splittype = 'auto', ignorestrain = False, obs = None, axial = True,
+		verbose = False, obs_prop = None, **ignored_opts):
+	"""Diagonalization for Landau-level Hamiltonian in bulk geometry for one value of momentum and magnetic field.
+
+	Arguments:
+	idx            Integer. Index in the grid (of magnetic field values).
+	k_b            Vector or float, or 2-tuple of those. Momentum, or momentum
+	               and magnetic field.
+	params         PhysParams instance.
+	lattice_reg    True or False. Whether to apply lattice regularization
+	               (x -> sin x).
+	split          Float. Amount of degeneracy lifting.
+	splittype      String. Type of degeneracy lifting.
+	ignorestrain   True or False. If True, do not include strain terms in the
+	               Hamiltonian.
+	obs            List of strings or None. Observable ids of the observables
+	               that will be calculated. If None or empty list, do not do
+	axial          True or False. If True, apply axial approximation. If False,
+	               include non-axial terms in the Hamiltonian.
+	obs_prop       ObservableList instance containing all observable properties.
+	bia            True or False. If True, include BIA terms in the Hamiltonian.
+	verbose        True of False. If True, print extra diagnostic information to
+	               stdout.
+
+	Returns:
+	A DiagDataPoint instance.
+	"""
+	if isinstance(k_b, tuple):
+		k, b = k_b
+	else:
+		k, b = k_b, 0.0
+	if isinstance(k, Vector):
+		kx, ky, kz = k.xyz()
+	else:
+		raise TypeError("Argument k must be a Vector instance")
+
+	if isinstance(b, Vector):
+		bx, by, bz = b.xyz()
+		if abs(bz) > 1e-10 and (abs(bx) > 1e-6 or abs(by) > 1e-6):
+			sys.stderr.write("ERROR (diagonalization.hbulk_ll): In-plane field components in combination with LLs not (yet) implemented.\n")
+			raise ValueError("Bx, By with Bz not yet implemented")
+		b = Vector(bz, astype = 'z')
+	else:
+		bz = b
+
+	t0 = rtime()
+	if verbose:
+		show_job_monitor("B=%s  LL start" % magn_monitor(b))
+
+	eival = []
+	eivec = []
+	ll_n = []
+	for n in range(-2, ll_max + 1):
+		ham = hm.hbulk_ll((0.0, 0.0, kz), b, n, params, lattice_reg = lattice_reg, ignorestrain = ignorestrain, axial = axial, bia = False)
+		if split != 0.0:
+			hamsplit = split * hm.hsplit_full(params, splittype, k = None, kdim = 2, bia = False, lattice_reg = lattice_reg)
+			whichbands = whichbands_ll(n, params.norbitals, bz)
+			ham += hamsplit[whichbands, :][:, whichbands]
+
+		eival1, eivec1 = eigh(ham)
+		eival.append(eival1)
+		eivec.append(scaleup_eivec(eivec1, params, len(eival1), n, bz))
+		ll_n.extend([n] * len(eival1))
+
+	ll_n = np.asarray(ll_n)
+	if verbose:
+		show_job_monitor("B=%s  LL done (%f s)" % (magn_monitor(b), rtime() - t0))
+	eival = np.concatenate(np.array(eival))
+	eivec = np.concatenate(np.array(eivec))
+
+	# either return eigenvectors, or observables only, if they are specified
+	ddp = DiagDataPoint((0.0, 0.0, kz), eival, eivec, paramval = b)
+	ddp.llindex = ll_n
+	ddp.calculate_observables(params, obs, obs_prop = obs_prop, magn = b)
+	if not (obs is None or obs == []):
+		ddp.delete_eivec()
+	return ddp
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/diagonalization/diagsolver.py b/kdotpy-v1.0.0/src/kdotpy/diagonalization/diagsolver.py
new file mode 100644
index 0000000000000000000000000000000000000000..4910ed783110f41ef08707c8ae90e47d4c5a7cc7
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/diagonalization/diagsolver.py
@@ -0,0 +1,783 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+from sys import stderr
+from os import environ
+from time import sleep, perf_counter as rtime  # high precision timing
+
+from ..config import get_config, get_config_num, get_config_num_auto
+from .. import cmdargs
+from .. import intervaltools
+
+from .diagdata import DiagDataPoint, NoOverlapError, SubsetError
+
+# Need to set thread number for scipy / umfpack / pyMKL before importing. For Intel MKL / FEAST it's ok to reset this later.
+import_num_threads = cmdargs.threads()
+environ['CUPY_CUDA_PER_THREAD_DEFAULT_STREAM'] = '1'  # Use a separate CUDA stream for each CPU thread (NOT process!)
+environ['OMP_NUM_THREADS'] = '%d' % import_num_threads if import_num_threads is not None else '1'
+environ['MKL_NUM_THREADS'] = '%d' % import_num_threads if import_num_threads is not None else '1'
+environ['KMP_WARNINGS'] = 'False'  # suppress deprecation warnings from MKL via pyMKL
+
+environ["JAX_ENABLE_X64"] = "True"  # If JAX is used, make sure it runs with double precision
+
+# Numpy/Scipy brings the default math, sparse matrix tools and solvers
+# UMFPACK is loaded automatically for LU solve, if installed.
+import numpy as np
+from scipy.linalg import eigh
+from scipy.sparse import eye, issparse
+from scipy.sparse.linalg import eigsh, factorized, splu
+from scipy.sparse.linalg.interface import LinearOperator
+
+# Optional packages for additional solver configuration. Only loaded if
+# installed in environment. Since they are optional, any import errors are
+# suppressed.
+
+# CUPY package for Nvidia CUDA support.
+# Best performance with large Hamiltonians and GPUs featuring TensorCores.
+try:
+    import cupy
+    from cupyx.scipy.sparse import csr_matrix as cupy_csr_matrix
+    from cupyx.scipy.sparse.linalg import eigsh as cupy_eigsh, factorized as cupy_factorized
+    from cupyx.scipy.sparse.linalg._interface import LinearOperator as CupyLinOp
+    HAS_CUPY = True
+except:
+    HAS_CUPY = False
+
+try:
+    import jax.numpy as jnp
+    import jax
+    HAS_JAX = True
+except Exception:
+    HAS_JAX = False
+
+# Interface to Intel MKL PARDISO solver (for LU decomposition - shift-invert solve)
+try:
+    # Unfortunately, pyMKL is no longer maintained (as of v0.0.3)
+    # See PR #16 for added single precision and new Intel OneAPI support.
+    # Installation of an suitably updated version is possible via:
+    # 'pip install git+git://github.com/fjbay/pyMKL@patch-1#egg=pyMKL'
+    # Alternative: pypardiso, but has more overhead per call and does not support complex matrices.
+    # When running on AMD CPUs, pay special attention to the MKL version
+    # you are using (see e.g. wiki tutorials/eigensolver optimization).
+    import pyMKL
+    HAS_PARDISO = True
+except:
+    HAS_PARDISO = False
+
+# Interface to FEAST solver libraries (either custom compilation of v4 or v2 via
+# Intel MKL). This algorithm does not require a shift-invert strategy, but finds
+# all eigenstates in a given energy interval, making it more efficient in
+# theory. Practically, with typical eigenvalue problems for kdotpy, it offers
+# worse performance due to dense eigenstate clustering at the cut-off energies.
+try:
+    from . import feastsolver as fs  # this prepares the solver for execution
+    HAS_FEAST = True
+except:
+    HAS_FEAST = False
+
+# global variables: issue warnings enabled
+feast_not_found_warning = True
+no_pardiso_warning = True
+no_cupy_warning = True
+cupy_warning_2d = True
+no_jax_warning = True
+
+
+class FastSpLuInv(LinearOperator):
+    """
+    SpLuInv:
+       helper class to repeatedly solve M*x = rhs
+       using a sparse LU-decomposition of mat M
+    """
+
+    def __init__(self, mat, solver='umfpack'):
+        t0 = rtime()
+        super().__init__(mat.dtype, mat.shape)
+        if solver == 'umfpack':
+            self.M_lu_solve = factorized(mat)  # Falls back to SuperLU silently if Scikit.umfpack is not available
+        elif solver == 'superlu':
+            # Chooses SuperLU directly. Equals default eigs(h) behaviour,
+            # but gives enables us to measure timing statistics.
+            self.M_lu_solve = splu(mat).solve
+        elif solver == 'pardiso':
+            pardiso_invert = pyMKL.pardisoSolver(mat, mtype=-4)
+            pardiso_invert.factor()
+            self.M_lu_solve = pardiso_invert.solve
+            self.solver_handle = pardiso_invert
+        else:
+            raise ValueError('Solver %s not supported' % solver)
+        self.solver = solver
+        self.isreal = not np.issubdtype(self.dtype, np.complexfloating)
+        self.n_mult = 0
+        self.pref_time = rtime()
+        self.total_t_arpack = 0
+        self.total_t_lu_solve = 0
+        self.verbose = "verbose" in cmdargs.sysargv
+        if self.verbose:
+            stderr.write("Initialized and factorized %s LU solver in %.3g s.\n" % (solver, rtime()-t0))
+
+    def __del__(self):
+        try:
+            if self.solver == 'pardiso':
+                self.solver_handle.clear()  # Clear memory of pardiso instance
+            if self.verbose:
+                stderr.write('Total ARPACK %.3g s (%.3g %%); Total LU-Inv solve %.3g s. Used %d MatVec operations.\n'
+                             % (self.total_t_arpack, 100 * self.total_t_arpack / (self.total_t_lu_solve + self.total_t_arpack),
+                                self.total_t_lu_solve, self.n_mult))
+        except AttributeError:
+            # In some rare cases (e.g. memory allocation errors), this instance
+            # will not have all (valid) attributes. This would throw additional
+            # distracting secondary error messages, therefore, we suppress those
+            # errors completely. The verbose output might also be suppressed, but
+            # would not be of much use in this case anyway.
+            pass
+
+    def _matvec(self, rhs):
+        """Custom definition of matrix-vector product M^-1 * rhs, where
+        the matrix M^-1 would be the virtual inverse of the original M (not actually calculated)
+        and the Vector rhs (usually an eigenvector candidate).
+        """
+        rhs = np.asarray(rhs)
+        t0 = rtime()
+
+        # careful here: splu.solve will throw away imaginary
+        # part of rhs if M is real (this part is taken from the original scipy definition)
+        if self.solver != 'pardiso' and self.isreal and np.issubdtype(rhs.dtype, np.complexfloating):
+            x = (self.M_lu_solve(np.real(rhs).astype(self.dtype)) + 1j * self.M_lu_solve(np.imag(rhs).astype(self.dtype)))
+        else:
+            x = self.M_lu_solve(rhs.astype(self.dtype))
+
+        # Extremely verbose output for debugging:
+        # stderr.write('MatVecMul %d done (%.3f ms). %.3f ms between calls\n'
+        #              % (self.n_mult, (rtime() - t0) * 1000, (t0 - self.pref_time) * 1000))
+
+        # Time between calls is about 99% due to ARPACK for standard hermitian eigenvalue problem:
+        self.total_t_arpack += t0 - self.pref_time
+        self.total_t_lu_solve += rtime() - t0
+        self.n_mult += 1  # Count matvec multiplications = ARPACK.iterations
+        self.pref_time = rtime()
+        return x
+
+
+class DiagSparseSolver:
+    """Template class for sparse matrix eigensolvers."""
+    def __init__(self, num_processes, num_threads, neig, worker_type = None, **ignored_opts):
+        self.neig = neig
+        self.num_processes = num_processes
+        self.num_threads = num_threads
+        self.lasteivec = None
+        self.reuse_eivec = False
+        self.verbose = "verbose" in cmdargs.sysargv
+        self.dtype = np.complex128
+        self.eival_accuracy = 1e-6
+        self.worker_type = worker_type
+        # The value handle_sigchld tells the TaskManager to redefine SIGCHLD to
+        # terminate on that signal. This is needed to make multiprocessing
+        # handle the case that a child process dies in a graceful way. However,
+        # this can interfere with some (external) solvers, like jax. For those
+        # solvers, handle_sigchld should be set to False. The value does not
+        # affect behaviour on Windows.
+        self.handle_sigchld = True
+        # Setting the thread number here (after import) is too late for some
+        # libraries. Therefore, this has already been set before import.
+        # However, some libraries can handle a change of thread numbers (others
+        # just ignore this), so it does not hurt to set current values here
+        # again.
+        environ['OMP_NUM_THREADS'] = '%d' % num_threads
+        environ['MKL_NUM_THREADS'] = '%d' % num_threads
+
+    def solve(self, mat):
+        """Solves the configured eigenvalue problem for matrix given by 'mat'.
+        Returns: (eigenvalues, eigenvectors)."""
+        raise NotImplementedError("Class 'DiagSparseSolver' has no implementation for 'solve'. Use a child class instead.")
+
+
+class EighSolver(DiagSparseSolver):
+    def __init__(self, num_processes, num_threads, **kwds):
+        super().__init__(num_processes, num_threads, 0, **kwds)
+
+    def solve(self, mat):
+        dense_mat = mat.toarray() if issparse(mat) else mat
+        eival, eivec = eigh(dense_mat)
+        return eival, eivec
+
+
+class FeastSolver(DiagSparseSolver):
+    def __init__(self, num_processes, num_threads, neig, minval, maxval, **kwds):
+        super().__init__(num_processes, num_threads, neig, **kwds)
+        self.minval = minval
+        self.maxval = maxval
+
+    def solve(self, mat):
+        max_tries = 4
+        eival, eivec = None, None
+        for i in range(max_tries):
+            if i > 0:
+                stderr.write("Retrying: %d/%d\n" % (i + 1, max_tries))
+            if not self.reuse_eivec:
+                self.lasteivec = None
+            eival, eivec, lasteivec, info = fs.feastsolve(mat, self.minval, self.maxval, self.neig, self.lasteivec,
+                                                          verbose=self.verbose)
+            if info == 0:
+                # Optimization for best speed: Set the requested amount of
+                # eigenvalues for following iterations (feastsolver
+                # automatically uses a optimal subspace size). Only increase if
+                # there are substantially more eigenvalues (as this triggers a
+                # reset of the eigenvector subspace).
+                if len(eival) < self.neig or len(eival) > 1.1 * self.neig:
+                    self.neig = len(eival)
+                    stderr.write("Info (FeastSolver): Automatically resizing subspace for optimal performance. "
+                                 "New number of requested eigenstates for next use of this solver: %d.\n" % self.neig)
+                self.lasteivec = lasteivec
+                return eival, eivec
+            if info == 1:
+                stderr.write("Warning (FeastSolver): No eigenvalue found in energy range.\n")
+                return None, None
+            if info == 2:
+                stderr.write("Warning (FeastSolver): Did not converge. This can have multiple issues. "
+                             "Try adjusting the energy range or requested eigenvalues.\n")
+                self.lasteivec = None  # Reset the input eivec for safety
+                break
+            if info == 3:
+                stderr.write("Warning (FeastSolver): The amount of requested eigenvalues is too small. "
+                             "Automatically doubling requested eigenvalues for following calculations.\n")
+                self.neig *= 2
+                stderr.write("New value: %d.\n" % self.neig)
+            # All other flags are ignored or raise an error anyway.
+        stderr.write("Warning (FeastSolver): Output eigenstates are unreliable. Please check.\n")
+        return eival, eivec
+
+
+class EigshSolver(DiagSparseSolver):
+    """Implements the default scipy eigsh solver."""
+    def __init__(self, num_processes, num_threads, neig, targetval, **kwds):
+        super().__init__(num_processes, num_threads, neig, **kwds)
+        self.targetval = targetval
+
+    def solve(self, mat):
+        eival, eivec = eigsh(mat, self.neig, sigma=self.targetval, v0=self.lasteivec if self.reuse_eivec else None)
+        if self.reuse_eivec:
+            self.lasteivec = eivec[:, 0]
+        return eival, eivec
+
+class EigshMultiSolver(DiagSparseSolver):
+    """Implements the default scipy eigsh solver."""
+    def __init__(self, num_processes, num_threads, neig, targetval, **kwds):
+        super().__init__(num_processes, num_threads, neig, **kwds)
+        self.targetval = targetval
+
+    def solve(self, mat):
+        if len(self.targetval) == 0:
+            raise ValueError
+        eival, eivec = eigsh(mat, self.neig, sigma=self.targetval[0])
+        intervals = [intervaltools.from_eivals(eival, self.targetval[0])]
+        if len(self.targetval) == 1:
+            return eival, eivec
+        ddp = DiagDataPoint(0, eival, eivec).sort_by_eival(inplace=True)
+        for targetval in self.targetval[1:]:
+            eival, eivec = eigsh(mat, self.neig, sigma=targetval)
+            ddp.extend_by(0, eival, eivec).sort_by_eival(inplace=True)
+            intervals.append(intervaltools.from_eivals(eival, targetval))
+        intervals = intervaltools.normalize(intervals)
+
+        if len(intervals) > 1:
+            stderr.write("ERROR (EigshMultiSolver.solve): Disconnected eigenvalue ranges: " + ", ".join(["[%.3f, %.3f]" % i for i in intervals]) + ".\n")
+            exit(1)  # TODO: Handle this exception more gently?
+        return ddp.eival, ddp.eivec
+
+class CustomShiftInvEigshSolver(EigshSolver):
+    """Implements the default scipy eigsh solver with a configurable shift-invert factorization."""
+    def __init__(self, num_processes, num_threads, neig, targetval, shift_invert_solver, **kwds):
+        super().__init__(num_processes, num_threads, neig, targetval, **kwds)
+        self.shift_invert_solver = shift_invert_solver
+
+    def solve(self, mat):
+        mat -= self.targetval * eye(mat.shape[0])
+        shift_invert_factorization = FastSpLuInv(mat, solver=self.shift_invert_solver)
+        eival, eivec = eigsh(mat, self.neig, sigma=self.targetval, v0=self.lasteivec, OPinv=shift_invert_factorization)
+        if self.reuse_eivec:
+            self.lasteivec = eivec[:, 0]
+        return eival, eivec
+
+
+class CupyShiftInvEigshSolver(CustomShiftInvEigshSolver):
+    """Implements the cupy version of eigsh solver running on CUDA capable GPUs
+    with  a configurable shift-invert factorization."""
+    def __init__(self, *args, dtype = 'single', **kwds):
+        super().__init__(*args, **kwds)
+        # Do not reuse eivecs. Tests yield no better performance,
+        # just more problems (thread safety!)
+        self.reuse_eivec = False
+        if dtype == 'single':
+            self.dtype = np.complex64
+            self.eival_accuracy = 1e-3
+        else:
+            self.dtype = np.complex128
+        # Threshold for successful single precision gemm operation (empirical value):
+        self.gemm_dim_threshold = get_config_num_auto('diag_solver_cupy_gemm_dim_thr')
+        if self.gemm_dim_threshold is None:
+            self.gemm_dim_threshold = 4e6
+
+    def solve(self, mat):
+        """Specialized solve implementation for optimal GPU performance.
+        Two algorithms are available:
+        - double precision solver: Just calls the cupy eigsh method for the full problem set in double precision.
+        - single precision solver: Makes best use of Nvidia GPUs' TensorCores (starting from Volta generation models).
+            To prevent numerical errors due to the reduced float precision and range, the eigenvalue problem is split
+            into multiple smaller solves. Preparing a new shift-invert LU decomposition is rather fast. """
+        # Number of additional Lanczos construction vectors. Use a little more
+        # than default recommendation to improve numerical stability and
+        # precision:
+        add_lzc_vec = 1.5 * self.neig
+        n_lzc_vec = self.neig + add_lzc_vec  # Full vector space
+        mul_thresh = mat.shape[0] * self.neig / self.gemm_dim_threshold
+        if self.dtype == np.complex64 and mul_thresh > 1:
+            n_lzc_vec = max(int(n_lzc_vec / mul_thresh), 40)
+            # smaller neig subset, but always request a minimum set size
+            neig = max(int(self.neig / mul_thresh), 10)
+        else:
+            neig = int(self.neig)
+            n_lzc_vec = int(n_lzc_vec)
+        target = self.targetval
+        try:
+            eival, eivec = self._solve(mat, target, neig, n_lzc_vec)
+        except (ArithmeticError, TimeoutError) as err:
+            # TODO: Consider flagging this as high load task and reschedule.
+            # TaskManager should make sure only limited number of high load
+            # tasks is active at once (not yet implemented)
+            if self.verbose and self.dtype == np.complex64:
+                stderr.write(str(err) + "\nFalling back to double precision solver (init).\n")
+            eival, eivec = self._fallback(mat, err)
+        ddp = DiagDataPoint(0, eival, eivec).sort_by_eival(inplace=True)
+        prev_target = np.array([target, target])  # holds next targets in down/up direction, initial value
+        range_factor = np.array([1.1, 1.1])
+        prefactor = np.array([-1, 1])
+        allowed_no_overlap = [False, False]
+        for _ in range(int(5 * mul_thresh) + 20):  # this will only execute with single precision, because
+            if not ddp.neig < self.neig:  # we abort this loop, as soon as enough eigenvalues are found
+                break
+            # extend search to both ends: From previous target value,
+            # set new target to lie beyond the maximum distance to newly found eigenvalues
+            # This should also cover cases where only values on one side of the original target are found (rare case)
+            next_target = np.abs(np.array([ddp.eival.min(), ddp.eival.max()])-prev_target) * prefactor * range_factor + prev_target
+            nt = np.argmin(np.abs(next_target-self.targetval))  # only step in the direction that is closer to targetval
+            prev_neig = ddp.neig
+            try:
+                eival, eivec = self._solve(mat, next_target[nt], neig, n_lzc_vec)
+            except (ArithmeticError, TimeoutError) as err:
+                if self.verbose:
+                    stderr.write(str(err) + "\nFalling back to double precision solver (%d found).\n" % prev_neig)
+                eival, eivec = self._fallback(mat, err)
+                ddp = DiagDataPoint(0, eival, eivec).sort_by_eival(inplace=True)
+                break  # return with double precision solve
+                # continue  # No convergence, we just retry
+            t0 = rtime()
+            # Problem with DDP.extend_by: Accuracy of shift-invert is lower, the
+            # further away from target. With single precision, this can be quite
+            # bad, and we are not able to clearly identify single eigenvalues
+            # just by value and remove overlapping duplicates.
+            # Solution: Check min/max values for overlap. Then shift sorted
+            # eivals against each other to find best total eigenvalue match
+            # (similar to bandalign algorithm, reusable?). Then we calculate the
+            # mean from both sets of overlapping eigenvalues, weighted by each
+            # point's distance to the target values.
+            try:
+                temp_ddp = ddp.stitch_with(0, eival, eivec, prev_target[nt], next_target[nt])
+                n_new_eival = temp_ddp.neig - prev_neig  # With overlap, duplicates will be removed
+                overlap = len(eival) - n_new_eival
+                if overlap < 5 and not allowed_no_overlap[nt]:
+                    raise NoOverlapError("Overlap of solutions too small (%d)." % (len(eival) - n_new_eival))
+            except NoOverlapError as err:
+                if allowed_no_overlap[nt]:
+                    temp_ddp = ddp.subset(range(ddp.neig))  # get copy
+                    temp_ddp.extend_by(0, eival, eivec).sort_by_eival(inplace=True)
+                else:  # No overlap detected, reduce stepping for next iteration
+                    # TODO: With improved stitching (results present on both
+                    # sides), we'd be able to use also this result. With the
+                    # current implementation, we have to throw it away.
+                    range_factor[nt] = max(range_factor[nt] - 0.1, 0.95)
+                    if self.verbose:
+                        stderr.write("    " + str(err) + '\n')
+                    continue
+            except SubsetError as err:  # New solution is subset of previous.
+                # Just continue as normal. Next target value will be further away.
+                if self.verbose:
+                    stderr.write("    " + str(err) + ". Target was %.3g.\n" % next_target[nt])
+                    stderr.write("    Prev: %d from %.3g to %.3g, New: %d from %.3g to %.3g.\n" %
+                                 (ddp.neig, ddp.eival.min(), ddp.eival.max(), len(eival), eival.min(), eival.max()))
+                    stderr.write("    Range factor: %.3f\n" % range_factor[nt])
+                # We know that there will be no states in the current solution
+                # interval beyond the current borders. Therefore, we reset the
+                # prev_target value to increase the step size in the next
+                # iteration, but only if this is the first of each series of
+                # subset errors (overlap not allowed):
+                prev_target[nt] = next_target[nt] if allowed_no_overlap else eival.min() if nt == 1 else eival.max()
+                range_factor[nt] = 0.95  # May not be larger then 1, otherwise it'd be possible to miss states.
+                allowed_no_overlap[nt] = True  # Next step in this direction is very likely to have no overlap
+                continue
+            except ValueError as err:
+                # Stitching can not return valid solution. Most likely cause is
+                # that the overlap contains only fully degenerate solutions.
+                # We could try to increase the overlap by adjusting the target
+                # value or the number of requested eigenstates, but this issue
+                # does not occur often, so it's easier and safer to fall back to
+                # double precision directly.
+                if self.verbose:
+                    stderr.write(str(err) + "\nFalling back to double precision solver (%d found).\n" % prev_neig)
+                eival, eivec = self._fallback(mat, err)
+                ddp = DiagDataPoint(0, eival, eivec).sort_by_eival(inplace=True)
+                break  # return with double precision solve
+            if n_new_eival < 0.85 * len(eival) and overlap > 10:
+                range_factor[nt] += 0.2
+            if self.verbose:
+                stderr.write("    %d new eigenstates. Stitched in %.3g s\n" % (n_new_eival, rtime()-t0))
+                stderr.write("    Prev: %d from %.3g to %.3g, New: %d from %.3g to %.3g.\n" %
+                             (ddp.neig, ddp.eival.min(), ddp.eival.max(), len(eival), eival.min(), eival.max()))
+                stderr.write("    Range factor: %.3f\n" % range_factor[nt])
+            prev_target[nt] = next_target[nt]
+            ddp = temp_ddp
+        if self.verbose and ddp.neig < self.neig:
+            stderr.write("FOUND ONLY %d eivecs.\n" % ddp.neig)
+        return ddp.eival, ddp.eivec
+
+    def _fallback(self, mat, error):
+        """Step by step fallback routine:
+        - Is called if single precision solver fails to converge.
+        - Tries the double precision version for the full sized problem.
+        - Should this also fail, create a (stable) CPU solver instance
+            and solve this problem on CPU only"""
+        try:
+            if self.dtype == np.complex64:
+                # Try double precision GPU solver
+                eival, eivec = self._solve(mat, self.targetval, self.neig, int(2 * self.neig), dtype=np.complex128)
+            else:
+                # Reraise and catch the error to fall back to CPU solver
+                raise error
+        except TimeoutError as err:
+            if self.verbose:
+                stderr.write("Double precision GPU solver failed: " + str(err) + "\nFalling back to CPU solver.\n")
+            cpu_solver = CustomShiftInvEigshSolver(self.num_processes, self.num_threads,
+                                                   self.neig, self.targetval, self.shift_invert_solver)
+            eival, eivec = cpu_solver.solve(mat)
+        return eival, eivec
+
+    def _solve(self, mat, target, neig, n_lzc_vec, dtype=None):
+        """Actual solve implementation: Configure shift-invert solver and use cupy eigsh."""
+        shift_mat = mat - target * eye(mat.shape[0])
+        shift_mat = shift_mat.astype(self.dtype if dtype is None else dtype)
+        shift_invert_factorization = CupyFastSpLUInv(shift_mat, solver=self.shift_invert_solver, neig=n_lzc_vec)
+        eival, eivec = cupy_eigsh(shift_invert_factorization, int(neig), ncv=int(n_lzc_vec))
+        eival = (1.0 / eival.get().astype(np.double)) + target  # revert shift-invert of eigenvalues
+        return eival, eivec.get().astype(np.complex128)
+
+class JaxEighSolver(CustomShiftInvEigshSolver):
+    def __init__(self, *args, **kwds):
+        super().__init__(*args, **kwds)
+
+        self.gpu_select = 0
+        # Do not treat SIGCHLD as 'terminate', as this would interfere with
+        # regular operation on jax.
+        self.handle_sigchld = False
+
+    """Implements the JAX eigh solver for spare matrices."""
+    def solve(self, mat):
+        try:
+            mat = mat.todense()
+        except AttributeError:  # Likely already a spare matrix
+            pass
+
+        # Distribute load across all available GPUs using
+        # a Round Robin approach.
+        gpu_select = self.gpu_select
+        self.gpu_select += 1
+        if self.gpu_select >= len(jax.devices("gpu")):
+            self.gpu_select = 0
+
+        with jax.default_device(jax.devices("gpu")[gpu_select]):
+            eival, eivec = jnp.linalg.eigh(mat)
+
+        # If self.neig is specified only return around self.targetval
+        # This is for compatablity with the existing implementation.
+        if self.neig is not None:
+            target_index = jnp.searchsorted(eival, self.targetval)
+            if target_index >= len(eival) - (self.neig // 2):
+                # targetval either larger than all eivals or almost larger
+                eival = eival[-self.neig:]
+                eivec = eivec[:,-self.neig:]
+            elif target_index - (self.neig // 2) <= 0:
+                # targetval either smaller than all eivals or almost smaller
+                eival = eival[:self.neig]
+                eivec = eivec[:,:self.neig]
+            else:
+                # targetval somewhere in the middle of the array
+                eival = eival[(target_index - self.neig // 2):(target_index + self.neig - self.neig // 2)]
+                eivec = eivec[:,(target_index - self.neig // 2):(target_index + self.neig - self.neig // 2)]
+
+        return np.array(eival), np.array(eivec)
+
+class CupyFastSpLUInv(CupyLinOp if HAS_CUPY else LinearOperator):
+    """
+    SpLuInv:
+       Helper class to repeatedly solve M*x = rhs
+       using a sparse LU-decomposition of mat M.
+       Do this on GPU using cupy CUDA library interface
+       or on CPU with GPU<>CPU RAM transfers (faster in all tests so far).
+
+       Note: If cupy package is not present, this class is redefined
+       (but not used) as scipy based class to prevent import errors.
+    """
+    def __init__(self, mat, dtype = None, solver='cupy', neig=0):
+        super().__init__(mat.dtype if dtype is None else dtype, mat.shape)
+        self.mat_dtype = mat.dtype
+        t0 = rtime()
+        if solver == 'cupy':
+            # Factorize once on CPU (uses scipy default SuperLU),
+            # solve multiple times on GPU
+            cu_mat = cupy_csr_matrix(mat)
+            self.M_lu_solve = cupy_factorized(cu_mat)
+        elif solver == 'umfpack':
+            # Factorize and solve on CPU (uses scipy default UMFPACK/SuperLU)
+            self.M_lu_solve = factorized(mat)
+        elif solver == 'superlu':
+            # Chooses SuperLU directly.
+            self.M_lu_solve = splu(mat).solve
+        elif solver == 'pardiso':
+            pardiso_invert = pyMKL.pardisoSolver(mat, mtype=-4)
+            pardiso_invert.factor()
+            self.M_lu_solve = pardiso_invert.solve
+            self.solver_handle = pardiso_invert
+        else:
+            raise ValueError('Solver %s not supported' % solver)
+        self.solver = solver
+        self.use_cpu = solver != 'cupy'
+        self.n_mult = 0
+        self.pref_time = rtime()
+        self.total_t_lanczos = 0
+        self.total_t_lu_solve = 0
+        self.verbose = "verbose" in cmdargs.sysargv
+        self.neig = neig
+        self.lanczos_iter = get_config_num('diag_solver_cupy_iterations', minval=1)
+        if self.verbose:
+            stderr.write("Initialized and factorized %s LU solver in %.3g s.\n" % (solver, rtime()-t0))
+
+    def __del__(self):
+        try:
+            if self.solver == 'pardiso':
+                self.solver_handle.clear()  # Clear memory of pardiso instance
+            if self.verbose and self.n_mult > 0:
+                if self.solver == 'cupy':
+                    # As both parts run asynchronously on the GPU, we can not use CPU based
+                    # timing here to distinguish both times. Just print the total time:
+                    stderr.write('Total Lanczos + LU-Inv solve %.3g s. Used %d MatVec operations.\n'
+                                 % (self.total_t_lanczos + self.total_t_lu_solve, self.n_mult))
+                else:
+                    stderr.write('Total Lanczos %.3g s (%.3g %%); Total LU-Inv solve %.3g s. Used %d MatVec operations.\n'
+                                 % (self.total_t_lanczos, 100 * self.total_t_lanczos /
+                                    (self.total_t_lu_solve + self.total_t_lanczos),
+                                    self.total_t_lu_solve, self.n_mult))
+        except AttributeError:
+            # In some rare cases (e.g. memory allocation errors), this instance
+            # will not have all (valid) attributes. This would throw additional
+            # distracting secondary error messages, therefore, we suppress those
+            # errors completely. The verbose output might also be suppressed, but
+            # would not be of much use in this case anyway.
+            pass
+
+    def _matvec(self, rhs):
+        """Custom definition of matrix-vector product M^-1 * rhs, where
+        the matrix M^-1 would be the virtual inverse of the original M (not actually calculated)
+        and the Vector rhs (usually an eigenvector candidate).
+        """
+        sleep(0)  # make this thread yield control to others, as we have to wait anyway
+        if not cupy.all(cupy.isfinite(rhs)):
+            raise ArithmeticError("Vector contains non-finite values. Range Overflow?")
+        if self.use_cpu:
+            rhs_cpu = cupy.asnumpy(rhs).astype(self.mat_dtype)
+        else:
+            rhs = rhs.astype(self.mat_dtype)
+        t0 = rtime()
+        t_lanczos = t0 - self.pref_time
+        self.total_t_lanczos += t_lanczos
+        t0 = rtime()
+        self.n_mult += 1  # Count matvec multiplications = Lanzcos iterations
+        x = self.M_lu_solve(rhs_cpu if self.use_cpu else rhs)
+        if self.neig is not None and (self.n_mult - 5) // self.neig >= self.lanczos_iter:
+            # stderr.write("Solver converges slowly! %d MatVecMult used. Aborting...\n" % self.n_mult)
+            raise TimeoutError("Did not converge.")
+        sleep(0)  # make this thread yield control to others, as we have to wait anyway
+        # Extremely verbose debug output:
+        # stderr.write('MatVecMul %d done (%.3f ms). %.3f ms between calls\n'
+        #              % (self.n_mult, (rtime() - t0) * 1000, t_lanczos * 1000))
+        self.total_t_lu_solve += rtime() - t0
+        self.pref_time = rtime()
+        if self.use_cpu:
+            return cupy.asarray(x, dtype=self.dtype)
+        return x.astype(self.dtype)
+
+def solverconfig(num_cpus, modelopts, script = None):
+    """Chooses a suitable sparse eigenvalue solver and sets its configuration.
+    Returns a DiagSparseSolver instance.
+    """
+    global feast_not_found_warning, no_pardiso_warning, no_cupy_warning, cupy_warning_2d, no_jax_warning
+    if 'neig' not in modelopts:
+        raise KeyError("Number of eigenvalues ('neig') not defined in modelopts")
+    all_solvers = [
+        'auto', 'automatic', 'feast', 'cupy_eigsh', 'jax_eigh', 'eigsh', 'superlu_eigsh',
+        'umfpack_eigsh', 'pardiso_eigsh'
+    ]
+    solver_config = get_config('diag_solver', choices = all_solvers).lower()
+    if solver_config in ['auto', 'automatic']:
+        if script in ['kdotpy-1d.py']:
+            if HAS_PARDISO:
+                solver_config = 'pardiso_eigsh'
+            else:
+                solver_config = 'eigsh'
+                stderr.write("'diag_solver' hint: PARDISO can improve solution speed, but is not available. Consider installing packages 'MKL' and 'pyMKL' (see Wiki).\n")
+            if HAS_CUPY:
+                stderr.write("'diag_solver' hint: Solver 'cupy_eigsh' is available and could improve performance under certain conditions. Double precision recommended.\n")
+        elif script in ['kdotpy-ll.py'] and HAS_CUPY and modelopts['ll_mode'] == 'full' and modelopts['neig'] >= 200:
+            solver_config = 'eigsh'
+            stderr.write("'diag_solver' hint: Solver 'cupy_eigsh' is available and could improve performance under certain conditions. Single precision recommended.\n")
+        else:
+            solver_config = 'eigsh'
+        stderr.write(f"'diag_solver' configuration 'automatic': Choosing '{solver_config}'")  # to be continued
+        stderr.write(".\n" if script is None else f" for script '{script}'.\n")
+
+    worker_type = get_config('diag_solver_worker_type', choices = ['auto', 'automatic', 'process', 'thread']).lower()
+    if worker_type in ['auto', 'automatic']:
+        if 'cupy' in solver_config or 'jax' in solver_config:
+            worker_type = 'thread'
+        else:
+            worker_type = 'process'
+        stderr.write("'diag_solver_worker_type' configuration 'automatic': Choosing '%s' for solver '%s'.\n" % (worker_type, solver_config))
+
+    if 'll_mode' in modelopts and modelopts['ll_mode'] != 'full':
+        # Renormalize total number of eigenvalues
+        if int(np.ceil(modelopts['neig'] / modelopts['ll_max'])) < 6:
+            stderr.write("Warning (lldiagonalization.hll): Requested number of eigenstates leads to < 6 eigenstates per LL index. Use minimum of 6 states per LL index instead.\n")
+        modelopts['neig'] = int(np.ceil(modelopts['neig'] / modelopts['ll_max']))
+
+    num_threads = cmdargs.threads()
+
+    if solver_config == 'feast':
+        if not HAS_FEAST:
+            if feast_not_found_warning:
+                stderr.write("Warning (diagsolver): FEAST solver could not be loaded. Please make the Intel MKL available. Falling back to legacy solver (scipy.sparse.linalg.eigsh).\n")
+                feast_not_found_warning = False  # only issue once
+            solver_config = 'eigsh'
+        else:
+            if 'erange' not in modelopts:
+                raise KeyError("Target energy range ('erange') not defined in modelopts")
+            emin, emax = modelopts['erange']
+            return FeastSolver(num_cpus, num_threads if num_threads is not None else 1,
+                               modelopts['neig'], emin, emax, worker_type = worker_type)
+
+    if 'eigs' in solver_config and 'energy' not in modelopts:
+        raise KeyError("Target energy ('energy') not defined in modelopts")
+
+    if solver_config == 'cupy_eigsh':
+        if not HAS_CUPY:
+            if no_cupy_warning:
+                stderr.write("Warning (diagsolver): CUDA solver could not be loaded. Please check your CUPY package. Falling back to legacy CPU 'eigsh' solver (with UMFPACK/SuperLU).\n")
+                no_cupy_warning = False  # only issue once
+            solver_config = 'umfpack_eigsh'
+            worker_type = 'process'
+        else:
+            # Set the float data type (precision) for calculations using the cupy solver.
+            cupy_dtype = get_config('diag_solver_cupy_dtype', choices = ['single', 'double']).lower()
+            if script == 'kdotpy-2d.py':
+                if cupy_warning_2d:
+                    stderr.write(f"Warning (diagsolver): Using the CUDA solver with {script} occasionally fails with eigenvalues at incorrect energies. Check your results and choose a different solver if you notice problems.\n")
+                    cupy_warning_2d = False  # only issue once
+
+            if not HAS_PARDISO:
+                if no_pardiso_warning:
+                    stderr.write("Warning (diagsolver): PARDISO solver could not be loaded. Please make the Intel MKL available. Falling back to legacy shift invert factorization solver (UMFPACK/SuperLU).\n")
+                    no_pardiso_warning = False  # only issue once
+                shift_invert_solver = 'umfpack'
+            else:
+                shift_invert_solver = 'pardiso'
+            return CupyShiftInvEigshSolver(
+                num_cpus, num_threads if num_threads is not None else 1,
+                modelopts['neig'], modelopts['energy'], shift_invert_solver,
+                dtype = cupy_dtype, worker_type = worker_type)
+
+    if solver_config == 'jax_eigh':
+        if not HAS_JAX:
+            if no_jax_warning:
+                stderr.write("Warning (diagsolver): JAX solver could not be loaded. Please check your JAX package. Falling back to legacy CPU 'eigsh' solver (with UMFPACK/SuperLU).\n")
+                no_jax_warning = False  # only issue once
+            solver_config = 'umfpack_eigsh'
+            worker_type = 'process'
+        else:
+            return JaxEighSolver(num_cpus, num_threads if num_threads is not None else 1,
+                                 modelopts['neig'], modelopts['energy'], None, worker_type = worker_type)
+
+    if solver_config == 'pardiso_eigsh':
+        if not HAS_PARDISO:
+            if no_pardiso_warning:
+                stderr.write("Warning (diagsolver): PARDISO solver could not be loaded. Please make the Intel MKL available. Falling back to legacy shift invert factorization solver (UMFPACK/SuperLU).\n")
+                no_pardiso_warning = False  # only issue once
+            solver_config = 'superlu_eigsh'
+        else:
+            return CustomShiftInvEigshSolver(num_cpus, num_threads if num_threads is not None else 1,
+                                             modelopts['neig'], modelopts['energy'], 'pardiso',
+                                             worker_type = worker_type)
+    if solver_config == 'umfpack_eigsh':
+        # Falls back to 'superlu_eigsh' silently, if UMFPACK is not available
+        return CustomShiftInvEigshSolver(num_cpus, num_threads if num_threads is not None else 1,
+                                         modelopts['neig'], modelopts['energy'], 'umfpack',
+                                         worker_type = worker_type)
+
+    if solver_config == 'superlu_eigsh':
+        # This is essentially equal to 'eigsh', but enables us to get some more detailed timing information.
+        return CustomShiftInvEigshSolver(num_cpus, num_threads if num_threads is not None else 1,
+                                         modelopts['neig'], modelopts['energy'], 'superlu',
+                                         worker_type = worker_type)
+
+    if solver_config == 'eigsh':
+        if isinstance(modelopts['energy'], list):
+            return EigshMultiSolver(num_cpus, num_threads if num_threads is not None else 1,
+                                    modelopts['neig'], modelopts['energy'], worker_type = worker_type)
+        else:
+            return EigshSolver(num_cpus, num_threads if num_threads is not None else 1,
+                               modelopts['neig'], modelopts['energy'], worker_type = worker_type)
+    else:
+        raise ValueError("Invalid value for variable solver_config")
diff --git a/kdotpy-v1.0.0/src/kdotpy/diagonalization/feastsolver.py b/kdotpy-v1.0.0/src/kdotpy/diagonalization/feastsolver.py
new file mode 100644
index 0000000000000000000000000000000000000000..c9bb4d1b2dbb23222a1ddb71e1984ad84cb9dee2
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/diagonalization/feastsolver.py
@@ -0,0 +1,193 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+from ctypes import byref, c_int32, c_double, c_char, cdll, POINTER
+import numpy as np
+from scipy.sparse import csr_matrix
+
+# region Define data types globally for feastsolver.py
+mkl_int = c_int32
+np_ctlib_flags = ['C_CONTIGUOUS', 'ALIGNED']
+array_1d_int = np.ctypeslib.ndpointer(
+    dtype=mkl_int,
+    ndim=1,
+    flags=np_ctlib_flags
+)
+array_1d_double = np.ctypeslib.ndpointer(
+    dtype=c_double,
+    ndim=1,
+    flags=np_ctlib_flags
+)
+array_1d_complex = np.ctypeslib.ndpointer(
+    dtype=np.complex128,
+    ndim=1,
+    flags=np_ctlib_flags
+)
+array_2d_complex = np.ctypeslib.ndpointer(
+    dtype=np.complex128,
+    ndim=2,
+    flags=np_ctlib_flags
+)
+# endregion
+# Load library
+try:
+    mkl = cdll.LoadLibrary("libfeast_rt.so")
+    FEAST_VERS = 'source'
+except:
+    mkl = cdll.LoadLibrary("libmkl_rt.so")
+    FEAST_VERS = 'intel'
+# region Define functions globally for feastsolver.py
+feastInit = mkl.feastinit_ if FEAST_VERS == 'source' else mkl.feastinit
+feastInit.argtypes = [POINTER(array_1d_int)]
+feastSolve = mkl.zfeast_hcsrev_ if FEAST_VERS == 'source' else mkl.zfeast_hcsrev
+feastSolve.argtypes = [
+    POINTER(c_char),  # uplo (a= U: upper, L: lower, F: full matrix)
+    POINTER(mkl_int),  # n (number nonzero elements)
+    POINTER(array_1d_complex),  # a (nonzero elements)
+    POINTER(array_1d_int),  # ia ([row] index of nonzero elements; + last element = n+1)
+    POINTER(array_1d_int),  # ja ([col] index of nonzero elements)
+    POINTER(array_1d_int),  # fpm (configuration values for solver)
+    POINTER(c_double),  # epsout (OUTPUT: relative error)
+    POINTER(mkl_int),  # loop (OUTPUT: number of refinement loops)
+    POINTER(c_double),  # emin (minimum of search interval)
+    POINTER(c_double),  # emax (maximum of search interval)
+    POINTER(mkl_int),  # m0 (guess for total numbers of ev)
+    POINTER(array_1d_double),  # e (OUTPUT: eval, first m of m0 filled)
+    POINTER(array_2d_complex),  # x (OUTPUT: evec per column, first m of m0 filled)
+    POINTER(mkl_int),  # m (OUTPUT: number of found ev)
+    POINTER(array_1d_double),  # res (OUTPUT: residual vector)
+    POINTER(mkl_int)
+    # info (OUTPUT: https://software.intel.com/content/www/us/en/develop/documentation/mkl-developer-reference-c/top/extended-eigensolver-routines/extended-eigensolver-interfaces-for-eigenvalues-within-interval/extended-eigensolver-output-details.html#extended-eigensolver-output-details_GUID-E1DB444D-B362-4DBF-A1DF-DA68F7FB7019)
+]
+# endregion
+# region Init feast variables
+fpm = np.zeros((128,), dtype=mkl_int)  # Array holds basic FEAST solver config parameters (see doc)
+feastInit(fpm.ctypes.data_as(POINTER(array_1d_int)))
+matType = c_char(b'F')  # specifies matrix type, use F for full matrix
+epsout = c_double(-1)  # output buffer, holds eps value (relative error) after calculation finishes
+
+
+# endregion
+
+
+def feastsolve(mat, emin, emax, n_evals, evec=None, verbose=False, check_inputs=False):
+    fpm[0] = 1 if verbose else 0  # print status
+    fpm[26] = 1 if (check_inputs and FEAST_VERS == 'intel') else 0  # check inputs, only Intel MKL (FEAST4 not documented)
+    # fpm[1] = 8  # N contour points
+    # fpm[2] = 9  # Stopping criterion
+    # fpm[15] = 2  # Use Zolotarev quadrature rules (only FEAST3+)
+    # fpm[42] = 1  # Use IFEAST instead (only FEAST4+)
+    # fpm[44] = 1  # IFEAST BiCGstab accuracy
+    # fpm[45] = 30000  # BiSGstab max iterations
+    n_evals *= 1.75  # size of subspace, use at least the amount of expected eigenvalues, best performance with 1.5-2x size
+    n_evals = int(n_evals)
+    mat = csr_matrix(mat, dtype=np.complex128)  # make sure matrix has correct data format
+    values = mat.data
+    row_ind = np.array(np.append(mat.indptr + 1, mat.nnz + 1), dtype=mkl_int)  # convert to one-based indices
+    col_ind = np.array(mat.indices + 1, dtype=mkl_int)  # convert to one-based indices
+    mat_dim = mkl_int(mat.shape[1])
+    loops = mkl_int(0)
+    emin = c_double(emin)
+    emax = c_double(emax)
+    evals = np.zeros((n_evals,), dtype=c_double)
+    if evec is None:
+        evec = np.zeros((n_evals, mat.shape[1]),
+                        dtype=np.complex128)  # if one trust the documentation this should be the wrong dimension order, but only this (and reverse transpose later on, returns correctly normalized eigenvectors. Re(check) memory layouts???
+        fpm[4] = 0  # use randomly generated subspace (no previous solution available)
+    else:
+        fpm[4] = 1  # use initial subspace (reuses previous solution)
+        diff_len = evec.shape[0] - n_evals
+        if diff_len == 0:
+            pass  # evec sizes still match up, nothing to do
+        elif diff_len > 0:
+            evec = evec[:n_evals, :]  # old subspace was larger, only use first ones (contains converged evecs)
+        else:
+            evec = np.zeros((n_evals, mat.shape[1]),
+                            dtype=np.complex128)  # we could try to append some random vectors to the old evec, but it is safer to let FEAST construct a new subspace
+            fpm[4] = 0  # use randomly generated subspace (no previous solution available)
+    m = mkl_int(0)
+    res_vec = np.zeros((n_evals,), dtype=c_double)
+    info = mkl_int(-1000)
+    n_evals = mkl_int(
+        n_evals)  # size of subspace, use at least the amount of expected eigenvalues, best performance with 1.5-2x size
+
+    feastSolve(byref(matType), byref(mat_dim),
+               values.ctypes.data_as(POINTER(array_1d_complex)),
+               row_ind.ctypes.data_as(POINTER(array_1d_int)),
+               col_ind.ctypes.data_as(POINTER(array_1d_int)),
+               fpm.ctypes.data_as(POINTER(array_1d_int)),
+               byref(epsout), byref(loops), byref(emin), byref(emax), byref(n_evals),
+               evals.ctypes.data_as(POINTER(array_1d_double)),
+               evec.ctypes.data_as(POINTER(array_2d_complex)),
+               byref(m),
+               res_vec.ctypes.data_as(POINTER(array_1d_double)),
+               byref(info)
+               )
+    m = m.value
+    info = info.value
+    if info < 0 or info >= 100:
+        raise RuntimeError("FEAST algorithm encountered error %d: %s\n" % (info, feastoutputmessage(info)))
+    normalized = evec.T[..., :m]
+    return evals[:m], normalized, evec, info
+
+
+def feastoutputmessage(info):
+    outputdict = {
+        "-1": "Internal error for memory allocation.",
+        "-2": "Internal error of the inner system solver. "
+              "Possible reasons: not enough memory for inner linear system solver or inconsistent input.",
+        "-3": "Internal error of the reduced eigenvalue solver.",
+        "-4": "Matrix B is not positive definite.",  # This can not happen for a normal eigenvalue problem (B = eye)
+        "0": "Successful!",
+        "1": "No eigenvalue found in the search interval. "
+             "Either no eigenvalues in range or range is orders of magnitude too large.",
+        "2": "No Convergence (number of iteration loops > %d)." % fpm[3],
+        "3": "Size of the subspace m0 is too small (m0 < m). Increase requested number of eigenvalues.",
+        "4": "Successful return of only the computed subspace after call with fpm[13] = 1.",
+        "200": "Problem with emin, emax (emin ≥ emax).",
+        "201": "Problem with size of initial subspace m0 (m0 ≤ 0 or m0 >n).",
+        "202": "Problem with size of the system n (n ≤ 0)."
+    }
+    if (-4 <= info <= 4) or (200 <= info <= 202):
+        return outputdict["%d" % info]
+    elif 100 <= abs(info) <= 163:
+        return "Problem with the argument fpm[%d] to FEASTs Intel MKL interface." % info - 101 if (info > 0) else -info - 100
+    else:
+        return "Message not defined."
diff --git a/kdotpy-v1.0.0/src/kdotpy/diagonalization/lldiagonalization.py b/kdotpy-v1.0.0/src/kdotpy/diagonalization/lldiagonalization.py
new file mode 100644
index 0000000000000000000000000000000000000000..9a5a25736b4c0099200a29930f5faf9a987dbe7c
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/diagonalization/lldiagonalization.py
@@ -0,0 +1,235 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import sys
+import numpy as np
+
+from ..parallel import parallel_apply_enumerate
+from ..momentum import Vector, VectorGrid
+
+from . import diagonalization as diag
+from .diagdata import DiagData
+from .diagsolver import EighSolver
+from . import diagsolver as dsolv
+
+## This module provides wrappers around several LL diagonalization
+## functions. The argument ll_mode selects which method should be
+## used.
+
+# Calculate bands at k = 0
+def hll_k0(ll_mode, ll_max, h_sym, params, modelopts = {}, pot = None, description = None, return_eivec = False, bandtype_warning_level = 1):
+	"""Wrapper for LL diagonalization at zero momentum.
+
+	Arguments:
+	ll_mode        'legacy', 'sym', or 'full'. The LL mode.
+	ll_max         Integer. The largest LL index taken into account.
+	h_sym          SymbolicHamiltonian instance. The Hamiltonian as function of
+	               k+ and k-. This argument is ignored in ll_mode is 'legacy'.
+	params         PhysParams instance.
+	modelopts      Dict instance. The keywords being passed to diagonalization
+	               and Hamiltonian functions.
+	pot            Array. Potential V(z) in meV as function of position.
+				   Overwrites potential in modelopts.
+	description    String. Status message for the progress monitor.
+	return_eivec   True, False or None. If True, keep eigenvector data in the
+	               return value (DiagDataPoint instance). If False, discard
+	               them. If None, discard them only if observables have been
+	               calculated.
+	bandtype_warning_level
+	               0, 1, 2. Whether to show no, some, or all warnings from the
+	               band_types function.
+
+	Returns:
+	DiagDataPoint instance.
+	"""
+	## Default for argument modelopts is not changed, hence safe
+	modelopts_k0 = modelopts.copy()
+	modelopts_k0['return_eivec'] = return_eivec
+	modelopts_k0['ll_mode'] = ll_mode
+	modelopts_k0['ll_max'] = ll_max
+	modelopts_k0['solver'] = dsolv.solverconfig(1, modelopts_k0)
+
+	if description is not None:
+		sys.stderr.write(description.strip("\n") + "\n")
+
+	if pot is not None:
+		modelopts_k0['pot'] = pot
+
+	if ll_mode in ['sym', 'full']:
+		diagdata_k0 = diag.hsym_k0(h_sym, params, orbital_magn = 0.0, bandtype_warning_level = bandtype_warning_level, **modelopts_k0)
+	elif ll_mode == 'legacy':
+		if params.lattice_transformed_by_matrix():
+			sys.stderr.write("ERROR (lldiagonalization.hll_k0): Lattice transformation cannot be used in legacy mode.\n")
+			exit(1)
+		diagdata_k0 = diag.hz_k0(params, bandtype_warning_level = bandtype_warning_level, **modelopts_k0)
+	else:
+		raise ValueError("Invalid LL mode")
+
+	if description is not None:
+		sys.stderr.write("1 / 1\n")
+
+	return diagdata_k0
+
+def hll(ll_mode, bs, ll_max, h_sym, params, modelopts = {}, list_kwds = {}, description = None, num_processes = 1):
+	"""Wrapper for LL diagonalization.
+
+	Arguments:
+	ll_mode        'legacy', 'sym', or 'full'. The LL mode.
+	bs             List/array of Vector instances or floats, or a VectorGrid
+	               instance. The magnetic field values.
+	ll_max         Integer. The largest LL index taken into account.
+	h_sym          SymbolicHamiltonian instance. The Hamiltonian as function of
+	               k+ and k-. This argument is ignored in ll_mode is 'legacy'.
+	params         PhysParams instance.
+	modelopts      Dict instance. The keywords being passed to diagonalization
+	               and Hamiltonian functions.
+	list_kwds      Dict instance. Keywords that have lists or arrays as values
+	               and are iterated over. That is, if list_kwds['key'] = arr,
+	               apply 'key' as a keyword with value arr[i] for the i'th
+	               point in the grid.
+	description    String. Status message for the progress monitor.
+	return_eivec   True, False or None. If True, keep eigenvector data in the
+	               return value (DiagDataPoint instance). If False, discard
+	               them. If None, discard them only if observables have been
+	               calculated.
+	num_processes  Integer. Number of processes used in parallelization.
+
+	Returns:
+	DiagData instance.
+	"""
+	## Defaults for arguments modelopts and list_kwds are not changed, hence safe
+	modelopts_bdep = modelopts.copy()
+
+	if ll_mode == 'sym':
+		# Renormalize total number of eigenvalues
+		if int(np.ceil(modelopts_bdep['neig'] / ll_max)) < 6:
+			sys.stderr.write("Warning (lldiagonalization.hll): Requested number of eigenstates leads to < 6 eigenstates per LL index. Use minimum of 6 states per LL index instead.\n")
+		modelopts_bdep['neig'] = int(np.ceil(modelopts_bdep['neig'] / ll_max))
+		data = DiagData(parallel_apply_enumerate(diag.hsym_ll, bs, (ll_max, h_sym, params), f_kwds = modelopts_bdep, fj_kwds = list_kwds, num_processes = num_processes, description = description), grid = bs)
+	elif ll_mode == 'full':
+		data = DiagData(parallel_apply_enumerate(diag.hsym_ll_full, bs, (ll_max, h_sym, params), f_kwds = modelopts_bdep, fj_kwds = list_kwds, num_processes = num_processes, description = description), grid = bs)
+		# transitions and transitions_range are ignored
+	elif ll_mode == 'legacy':
+		# Renormalize total number of eigenvalues
+		if int(np.ceil(modelopts_bdep['neig'] / ll_max)) < 6:
+			sys.stderr.write("Warning (lldiagonalization.hll): Requested number of eigenstates leads to < 6 eigenstates per LL index. Use minimum of 6 states per LL index instead.\n")
+		modelopts_bdep['neig'] = int(np.ceil(modelopts_bdep['neig'] / ll_max))
+		if params.lattice_transformed_by_matrix():
+			sys.stderr.write("ERROR (lldiagonalization.hll): Lattice transformation cannot be used in legacy mode.\n")
+			exit(1)
+		data = DiagData(parallel_apply_enumerate(diag.hz_ll, bs, (ll_max, params), f_kwds = modelopts_bdep, fj_kwds = list_kwds, num_processes = num_processes, description = description), grid = bs)
+	else:
+		raise ValueError("Invalid LL mode")
+
+	return data
+
+def hbulk_ll(
+		ll_mode, kbs, ll_max, h_sym, params, modelopts = {}, list_kwds = {},
+		description = None,	num_processes = 1):
+	"""Wrapper for bulk LL diagonalization
+
+	Arguments:
+	ll_mode        'legacy', 'sym', or 'full'. The LL mode.
+	bs             List/array of Vector instances or floats, or a VectorGrid
+	               instance. The magnetic field values.
+	ll_max         Integer. The largest LL index taken into account.
+	h_sym          SymbolicHamiltonian instance. The Hamiltonian as function of
+	               k+ and k-. This argument is ignored in ll_mode is 'legacy'.
+	params         PhysParams instance.
+	modelopts      Dict instance. The keywords being passed to diagonalization
+	               and Hamiltonian functions.
+	list_kwds      Dict instance. Keywords that have lists or arrays as values
+	               and are iterated over. That is, if list_kwds['key'] = arr,
+	               apply 'key' as a keyword with value arr[i] for the i'th
+	               point in the grid.
+	description    String. Status message for the progress monitor.
+	return_eivec   True, False or None. If True, keep eigenvector data in the
+	               return value (DiagDataPoint instance). If False, discard
+	               them. If None, discard them only if observables have been
+	               calculated.
+	num_processes  Integer. Number of processes used in parallelization.
+
+	Returns:
+	DiagData instance.
+	"""
+	## Defaults for arguments modelopts and list_kwds are not changed, hence safe
+
+	## Calculate LL dispersion
+	if ll_mode == 'sym':
+		modelopts['solver'] = EighSolver(num_processes, 1)
+		data = DiagData(parallel_apply_enumerate(diag.hsym_ll, kbs.b, (ll_max, h_sym, params), f_kwds = modelopts, fj_kwds = list_kwds, num_processes = num_processes, description = description), grid = kbs.b)
+	elif ll_mode == 'full':
+		modelopts['solver'] = EighSolver(num_processes, 1)  # Sparse solver would also work
+		data = DiagData(parallel_apply_enumerate(diag.hsym_ll_full, kbs.b, (ll_max, h_sym, params), f_kwds = modelopts, fj_kwds = list_kwds, num_processes = num_processes, description = description), grid = kbs.b)
+		# transitions and transitions_range are ignored
+	elif ll_mode == 'legacy':
+		if params.lattice_transformed_by_matrix():
+			sys.stderr.write("ERROR (lldiagonalization.hbulk_ll): Lattice transformation cannot be used in legacy mode.\n")
+			exit(1)
+		data = DiagData(parallel_apply_enumerate(diag.hbulk_ll, kbs, (ll_max, params), modelopts, num_processes = num_processes, description = description), grid = kbs.b)
+	else:
+		raise ValueError("Invalid LL mode")
+
+	return data
+
+def hbulk_ll0(params, modelopts = {}, description = None):
+	"""Wrapper for bulk LL diagonalization at zero momentum, zero magnetic field
+
+	Arguments:
+	params         PhysParams instance.
+	modelopts      Dict instance. The keywords being passed to diagonalization
+	               and Hamiltonian functions.
+	description    String. Status message for the progress monitor.
+
+	Returns:
+	DiagDataPoint instance.
+	"""
+	k0 = Vector(0.0, astype='z')
+	b0 = Vector(0.0, astype='z')
+
+	if description is not None:
+		sys.stderr.write(description.strip("\n") + "\n")
+
+	ddp = diag.hbulk((k0, b0), params, **modelopts)
+
+	if description is not None:
+		sys.stderr.write("1 / 1\n")
+
+	return ddp
diff --git a/kdotpy-v1.0.0/src/kdotpy/diagonalization/stitch.py b/kdotpy-v1.0.0/src/kdotpy/diagonalization/stitch.py
new file mode 100644
index 0000000000000000000000000000000000000000..108bccb1b65d60a35cdb5e58501ae7d694439693
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/diagonalization/stitch.py
@@ -0,0 +1,144 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+
+def align_energies(e1, e2, align_exp = 4):
+	"""Align two energy arrays.
+
+	This is a slightly simplified version of bandalign.base.align_energies().
+	Please see the information there.
+	Note: We do not import bandalign.base.align_energies() as it gives rise to
+	a circular dependency (between diagdata and bandalign, essentially) and
+	because it is designed for something else, i.e., using it for stitching
+	would be "Zweckentfremdung".
+
+	Arguments:
+	e1             Array of floats. First set of eigenvalues.
+	e2             Array of floats. Second set of eigenvalues.
+	align_exp      Float/integer. Exponent e of the minimization function, see
+	               above. Unlike in bandalign.base.align_energies(), the value
+	               'max' is not allowed.
+
+	Note:
+	The arrays e1 and e2 must be sorted in ascending order, otherwise the
+	behaviour is undefined.
+
+	Returns:
+	l1, r1, l2, r2   Integers. Left and right indices of the overlapping
+	                 regions in the arrays. The overlapping regions can be
+	                 extracted as e1[l1:r1], e2[l2:r2]. The non-overlapping
+	                 regions are e1[:l1], e2[:l2] on the left, e1[r1:], e2[r2:]
+	                 on the right. Note that of each of these pairs, at most one
+	                 member has length > 0. In other words, l1 = 0 or l2 = 0,
+	                 and n1 - r1 = 0 or n2 - r2 = 0.
+
+	Examples:
+	align_energies([4,5], [0,1,2,3,4,5,6])  yields   4, [4,5], [4,5]
+	align_energies([0,1,2,3,4,5,6], [4,5])  yields  -4, [4,5], [4,5]
+	"""
+	n1 = len(e1)
+	n2 = len(e2)
+	if (n1 > 1 and np.any(np.diff(e1) < 0.0)) or (n2 > 1 and np.any(np.diff(e2) < 0.0)):
+		raise ValueError("Input arrays must be sorted in ascending order")
+
+	e2a = np.concatenate((np.ones(n1-1) * float("nan"), e2, np.ones(n1-1) * float("nan")))
+	deltas = np.nansum(np.array([(np.abs(e2a[j:j + n1] - e1))**align_exp for j in range(0, n1 + n2 - 1)]), axis=1)
+	ndeltas = np.count_nonzero(~np.isnan(np.array([(e2a[j:j + n1] - e1) for j in range(0, n1 + n2 - 1)])), axis=1)
+
+	# Compared to bandalign.base.align_energies(), the following is equivalent
+	# to ndelta_weight = 0
+	alignment = np.argmin(deltas / ndeltas) - (n1 - 1)
+	l1, r1 = max(0, -alignment), min(n1, n2 - alignment)
+	l2, r2 = max(0, alignment), min(n2, n1 + alignment)
+
+	return l1, r1, l2, r2
+
+def stitch(eival1, eival2, eivec1, eivec2, targetenergy1, targetenergy2, accuracy=0.01):
+	"""Stitch two sets of eigenvalues and eigenvectors
+
+	Arguments:
+	eival1, eival2   Numpy arrays of dim 1. The two sets of eigenvalues.
+	eivec1, eivec2   Numpy arrays of dim 2. The two sets of eigenvectors. The
+	                 shape of these arrays must be (N, len(eival1)) and
+	                 (N, len(eival2)), respectively.
+	targetenergy1, targetenergy2
+	                 Float values. The targetenergy values used for
+	                 diagonalization.
+	accuracy         Estimate of solver precision. Used to determine
+                     degeneracy of states.
+
+	Returns:
+	new_eival        Numpy array of dim 1. The stitched set of eigenvalues.
+	new_eivec        Numpy array of dim 2. The stitched set of eigenvectors.
+	"""
+	l1, r1, l2, r2 = align_energies(eival1, eival2)
+	left_e1, overlap_e1, right_e1 = eival1[:l1], eival1[l1:r1], eival1[r1:]
+	left_e2, overlap_e2, right_e2 = eival2[:l2], eival2[l2:r2], eival2[r2:]
+	delta_e1, delta_e2 = np.amax(np.diff(overlap_e1)), np.amax(np.diff(overlap_e2))
+	if delta_e1 < accuracy or delta_e2 < accuracy:
+		raise ValueError("Error while stitching solutions. Overlapping eigenvalues could be fully degenerate.")
+
+	# Get weighted average of eigenvalues in the overlapping region
+	diff_e1, diff_e2 = overlap_e1 - targetenergy1, overlap_e2 - targetenergy2
+	weight_e1, weight_e2 = np.abs(1 / diff_e1), np.abs(1 / diff_e2)
+	overlap_eivals = (overlap_e1 * weight_e1 + overlap_e2 * weight_e2) / (weight_e1 + weight_e2)
+
+	# Compose eigenvalues and eigenvectors like
+	# (left_values, overlap_values, right_values)
+	n1 = len(eival1)
+	left_eivals = left_e1 if l1 > 0 else left_e2
+	right_eivals = right_e1 if r1 < n1 else right_e2
+	left_eivecs = eivec1[:, :l1] if l1 > 0 else eivec2[:, :l2]
+	right_eivecs = eivec1[:, r1:] if r1 < n1 else eivec2[:, r2:]
+	overlap_eivecs = np.where(weight_e1 > weight_e2, eivec1[:, l1:r1], eivec2[:, l2:r2])
+	new_eival = np.concatenate((left_eivals, overlap_eivals, right_eivals))
+	new_eivec = np.hstack((left_eivecs, overlap_eivecs, right_eivecs))
+
+	# The next issue can occur when overlapping eigenvalues are highly
+	# degenerate and the degeneracy is higher than the overlap. This should
+	# have been covered by the heuristic above, but we test the validity
+	# again to be on the safe side.
+	# Example: Calculation of 30 Landau levels at zero magnetic field with
+	# overlap of just 20 eigenvalues. It's not possible to stitch those
+	# solutions correctly without further information.
+	if np.any(np.diff(new_eival) < 0.0):
+		raise ValueError("Error while stitching solutions. Eigenvalues are not monotonic.")
+	return new_eival, new_eivec
diff --git a/kdotpy-v1.0.0/src/kdotpy/doc.py b/kdotpy-v1.0.0/src/kdotpy/doc.py
new file mode 100644
index 0000000000000000000000000000000000000000..d31340605a0f53cd4125ea170869ff1fd25dc0f8
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/doc.py
@@ -0,0 +1,104 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import pkgutil
+import inspect
+import pydoc
+import sys
+import kdotpy
+
+def search_submodule(submodule, target):
+	"""Search functions and classes inside a submodule"""
+	matches = set()
+	target = target.lower()
+	for name, obj in inspect.getmembers(submodule):
+		if name.lower() == target:
+			if (inspect.isfunction(obj) or inspect.isclass(obj)) and obj.__module__.startswith('kdotpy'):
+				matches.add(f"{obj.__module__}.{name}")
+			continue
+		if inspect.isclass(obj) and obj.__module__.startswith('kdotpy'):
+			class_functions = search_class_functions(obj, target)
+			class_functions_full = {f"{obj.__module__}.{name}.{fn}" for fn in class_functions}
+			matches |= class_functions_full
+	return matches
+
+def search_class_functions(cls, target):
+	"""Search member functions inside a class
+
+	Note: We currently do not consider nested classes, because there is no need
+	for it.
+	"""
+	matches = set()
+	target = target.lower()
+	for name, obj in inspect.getmembers(cls):
+		if name.lower() == target and inspect.isfunction(obj):
+			matches.add(name)
+	return matches
+
+def scan_submodules(target):
+	"""Scan submodules for name target"""
+	matches = set()
+	# Some names are included because importing them has side effects
+	exclude = ['kdotpy.__main__', 'kdotpy.testselfcon', 'kdotpy.testsymbolic']
+	for x in pkgutil.walk_packages(kdotpy.__path__, kdotpy.__name__ + '.'):
+		if x.name in exclude:
+			continue
+		if '-' in x.name:
+			continue
+		submodule = pkgutil.resolve_name(x.name)
+		matches |= search_submodule(submodule, target)
+	return matches
+
+def doc(target):
+	"""Search classes and functions for documentation for object target"""
+	if target.startswith('kdotpy'):
+		pydoc.doc(target)
+		return
+	matches = list(sorted(scan_submodules(target)))
+	if len(matches) == 0:
+		sys.stderr.write("ERROR (kdotpy): Not a known function or class\n")
+		sys.exit(3)
+	elif len(matches) == 1:
+		pydoc.doc(matches[0])
+	else:
+		print("Multiple matching options:")
+		for m in matches:
+			print(m)
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/docs/helpfile.txt b/kdotpy-v1.0.0/src/kdotpy/docs/helpfile.txt
new file mode 100644
index 0000000000000000000000000000000000000000..091953a43fc8ad88f6850f616e767d15b93b0c18
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/docs/helpfile.txt
@@ -0,0 +1,2321 @@
+Command line reference
+======================
+
+Note: In the following, # #1 #2 denote parameters, usually numerical, unless
+      stated otherwise. Bracketed arguments [#] are optional.
+Note: The options themselves are not case sensitive and underscores are ignored.
+      This is not necessarily valid for their (additional) parameters.
+
+
+Call signatures
+---------------
+
+The kdotpy application is actually a collection of several subprograms, each of
+which may be seen a separate python script.
+
+All subprograms are called from the main script main.py, that is invoked by the
+command kdotpy, provided that the kdotpy module has been installed correctly.
+The call signature is
+
+$ kdotpy xxx arg1 arg2 ...
+
+where xxx is replaced by the subprogram label. The available subprograms are:
+
+kdotpy 1d        For 1D dispersions, i.e., strip geometry.
+kdotpy 2d        For 2D dispersions, i.e., slab geometry.
+kdotpy ll        For 2D Landau level calculations.
+kdotpy bulk      For bulk (3D) dispersions.
+kdotpy bulk-ll   For Landau level calculations in 3D.
+kdotpy merge     For replotting and/or merging existing data files.
+kdotpy compare   For comparing data from two or more existing data files.
+kdotpy batch     For doing a batch run.
+kdotpy test      For running tests, useful for development purposes.
+kdotpy help      For showing help from this file.
+kdotpy doc       For showing development documentation.
+kdotpy version   For showing the version number.
+
+Alternatively, one may use
+
+$ python3 -m kdotpy xxx arg1 arg2 ...
+
+
+Definition of the layer stack
+-----------------------------
+lwell #         Thickness of the active layer (e.g., quantum well) in nm.
+                (Alias: lqw, qw)
+lbarr #1 [#2]   Thickness of the barrier layers in nm. If one thickness is
+                given, assume this value for both barrier layers (bottom and
+                top). If two thicknesses are given, the first and second
+                argument refer to the bottom and top layer, respectively. The
+                input 'lbarr #1 lbarr #2' is equivalent to 'lbarr #1 #2'.
+                (Alias: lbar, lbarrier, bar, barr, barrier)
+llayer #1 [#2 ...]
+                Thicknesses of the layers in nm. The number of layers may be
+                arbitrary, but the number of thicknesses must always be equal to
+                the number of materials. This argument may not be combined with
+                lwell and lbarr.
+                (Alias: llayers, layer, layers, thickness, thicknesses,
+                thicknesses, thick)
+mwell #1 [#2]   Material for the well layer. See 'mlayer' on how to input a
+                material. (Alias: mqw)
+mbarr #1 [#2]   Material for the barrier layers. See 'mlayer' on how to input a
+                material. (Alias: mbarrier, mbar)
+mlayer #1 [#2 ...]
+                Material specification for an arbitrary number of layers. Each
+                material instance is a material id or compound (e.g., HgMnTe,
+                HgCdTe), optionally followed by extra numerical arguments that
+                define the composition. The composition can either be specified
+                as par of the compound (chemical formula) or as these extra
+                arguments. Fractions and percentages are both accepted. Thus,
+                all of the following are equivalent: HgMnTe 2%, HgMnTe 0.02,
+                HgMn0.02Te, Hg0.98Mn0.02Te, HgMn2%Te, HgMn_{0.02}Te, etc.
+                NOTE: The chemical formula is case sensitive. This eliminates
+                ambiguity. The number of specified materials must match the
+                number of thicknesses (llayer; lwell and lbarr).
+                (Alias: mater, material)
+msubst #1 [#2]  Material for the substrate. This only sets the lattice constant
+                which is used to calculate strain. If this argument is omitted,
+                the strain is taken from the strain or the alattice argument.
+                (Alias: msub, substrate, msubstrate)
+ltypes #1       Define the type (purpose of each layer). The argument must be a
+                string of the following letters whose length must match the
+                number of layers in the stack:
+                b       barrier
+                c       cap
+                d       doping
+                q OR w  well
+                s       spacer
+                (Alias: ltype, lstack)
+                NOTE: Some functions will work properly only if there is exactly
+                one 'well' layer.
+ldens #1 [#2 ...]
+                For each layer, the 'background density' of charge, for example
+                doping. There need to be as many values as there are layers. The
+                values are expressed in e/nm^2.
+                (Alias: layerdens, layerdensity)
+
+Other geometrical parameters
+----------------------------
+zres #          Resolution in the z direction in nm. (Alias: lres)
+width #         Width of the sample (in the y direction. If a single number is
+                given, this determines the width in nm. If the argument is given
+                as #1*#2 or #1 * #2, where #1 is an integer, then the sample has
+                #1 sites in the y direction spaced by a distance of #2 nm each.
+                If the argument is given as #1/#2 or #1 / #2, then the total
+                width is #1 and the resolution #2.
+                (Alias: W)
+yres #          Resolution in the y direction in nm. (Alias: wres)
+linterface #    Smoothing width of the interface in nm. (Alias: interface)
+periodicy       Enables periodic boundary conditions in the y direction (only
+                applies to 1d)
+stripangle #    Angle in degrees between the translationally invariant direction
+                of the strip/ribbon and the (100) lattice vector (kdotpy 1d
+                only). Default: 0
+                (Alias: ribbonangle)
+stripdir #      Direction of the translationally invariant direction of the
+                strip/ribbon in lattice coordinates. The argument may be a
+                lattice vector, e.g., 130 for (1,3,0) or any of x, y, xy, and
+                -xy (equivalent to 0, 90, 45, and -45 degrees). Only one
+                argument 'stripangle' or 'stripdir' should be given.
+                (Alias: ribbondir)
+orientation # [#] [#]
+                Orientation of the lattice. (Alias: orient)
+                Possible patterns (ang = angle, dir = direction triplet):
+                  ang          Rotation around z (like stripangle)
+                  ang ang      Tilt z axis, then rotate around z axis
+                  ang ang ang  Euler rotation z x z. Rotate around c axis, tilt
+                               z axis, rotate around z axis
+                  dir          Longitudinal direction x (like stripdir)
+                  - dir        Growth direction z
+                  dir dir      Longitudinal and growth direction x, z
+                  - dir dir    Transversal and growth direction y, z
+                  dir dir dir  Longitudinal, transversal, and growth direction
+                               x, y, z
+                  dir ang OR ang dir
+                               Growth direction z and rotation around z
+                Format for the inputs: For angles ('ang'), enter an explicit
+                floating point number containing a decimal sign (period '.').
+                Integer values (for example 45 degrees) can be entered as 45.,
+                45.0, 45d, or 45°. Direction triplets ('dir') are a triplet of
+                digits without separators and possibly with minus signs (e.g.,
+                100, 111, 11-2, -110) or numbers separated by commas without
+                spaces (e.g., 1,1,0 or 10,-10,3).
+NOTE: If the option 'orientation' is used, the program uses an alternative
+construction method for the Hamiltonian, which may cause the time consumption by
+this step to increase by a factor of approximately 4. There is no exception for
+trivial orientations, like 'orientation - 001', which still invokes the
+alternative construction method.
+
+
+Other physical parameters
+-------------------------
+External parameters:
+b # [...]       External magnetic field in T. Ranges are input as described in
+                the section 'Vectors and Ranges', see below.
+temp #          Temperature in K. The temperature affects the gap size (band
+                edges) and the Mn exchange coupling. Optionally, it sets the
+                thermal broadening of the density of states if the argument
+                'broadening thermal' (without value) is given.
+                NOTE: The gap dependence on temperature has been implemented
+                since version v0.81. For older versions, the values at T = 0 K
+                were used regardless of the temperature set by 'temp #'.
+
+Specification of strain:
+ignorestrain    Ignore the strain terms in the Hamiltonian. (Alias: nostrain)
+strain # [# #]  Set strain value. The value may be set as a number or percentage
+                (e.g., -0.002 or -0.2%). The value is interpreted as the
+                'relative strain' epsilon = (a_strained / a_unstrained) - 1,
+                where a_unstrained refers to the well material. (In layer stacks
+                with more than three layers, the well may not be identified, and
+                then this option cannot be used. Setting 'strain none' is
+                equivalent to 'ignorestrain'. It is also possible to specify
+                more than one argument; then the values are interpreted as
+                epsilon_xx, epsilon_yy, and epsilon_zz. It is possible to enter
+                - for one or two values; then the strain values corresponding to
+                these components are determined from the other one(s). If
+                'strain' is used together with "ignorestrain", the latter has
+                priority, i.e., no strain is applied.
+                NOTE: It is no longer possible to specify a strain axis. One can
+                use 'strain' with two or three numerical arguments instead.
+alattice #1     Set the lattice constant of the strained materials.
+                (Alias: alatt, latticeconst)
+NOTE: Only one of the three options 'msubst', 'alattice', 'strain #value' may be
+used at once.
+
+Material parameters:
+matparam #      Modify the material parameters. The argument can either be a
+                materials file or a sequence of parameter=value pairs. For the
+                latter, multiple parameters must be separated by semicolons (;)
+                and must be preceded by the material identifier, like so:
+                matparam 'HgTe:gamma1=4.1;gamma2=0.7;CdTe:gamma1=1.6'
+                Spaces are ignored and the colon (:) after the material may be
+                replaced by period (.) or underscore (_). The argument must be
+                quoted in the shell if it contains spaces. The material need not
+                be repeated for subsequent parameters, so that in the example,
+                gamma2 refers to the material HgTe. The values may be Python
+                expressions, but restrictions apply (see the information for
+                material parameter files). Note that all expressions must
+                resolve to numerical values in order for kdotpy to run
+                successfully. Multiple matparam arguments will be processed in
+                order of appearance on the command line. (Alias: materialparam)
+
+Options affecting calculation
+-----------------------------
+Modelling:
+norb #          Number of orbitals in the Kane model. The argument can be either
+                6 or 8, which means exclusion or inclusion, respectively, of the
+                Gamma7 orbitals. (Alias: orbitals, orb)
+                Shorthand for 'norb 6': 6o, 6orb, 6orbital, 6band, sixband
+                Shorthand for 'norb 8': 8o, 8orb, 8orbital, 8band, eightband
+                NOTE: Omission of this input is not permitted.
+noren           Do not renormalize the parameters if using anything else than
+                then eight-orbital Kane model. (Alias: norenorm,
+                norenormalization, norenormalisation)
+lllegacy, llfull
+                Force Landau level mode to be 'legacy' or 'full'. By default,
+                the Landau level calculation uses either the symbolic mode 'sym'
+                if possible or the full mode if necessary. The legacy mode may
+                not be used if the full mode were required. By giving 'llfull',
+                one may also use the full mode if the automatically chosen 'sym'
+                mode does not give the desired results. Beware that full mode is
+                much heavier on resources. (kdotpy ll and kdotpy bulk-ll)
+llmax           Maximum Landau level index. This has to be an integer >= 0. If
+                omitted, 30 is used. Larger values yield a more complete result,
+                but require more computation time and memory. (Alias: nll)
+
+Symmetries, boundary conditions, degeneracy lifting, etc.:
+noax            Include non-axial terms, i.e., break the axial symmetry. (Alias:
+                noaxial, nonaxial)
+ax              Use axial symmetry, i.e., the axial approximation. This is the
+                default for Landau level mode (kdotpy ll and kdotpy bulk-ll).
+                For dispersions, either 'ax' or 'noax' is required.
+                (Alias: axial)
+split #         Splitting (in meV) to lift the degeneracies. (Choose it to be
+                small, e.g., 0.01.
+splittype #     Type of degeneracy splitting. One of the following choices:
+                  automatic  Choose sgnjz if BIA is disabled, bia if BIA is
+                             enabled. (Alias: auto; default)
+                  sgnjz      Use the operator sgn(J_z). Despite the fact that
+                             this quantity is not a conserved quantum number, it
+                             works remarkably well. This is also the default for
+                             calculations without BIA.
+                  sgnjz0     Use the operator sgn(J_z) at k = 0 only. This type
+                             can be useful if the degeneracy is broken for
+                             nonzero k as a result of some other term, for
+                             example an electric field.
+                  isopz      Use isoparity 'isopz'. This observable
+                             distinguishes the two blocks and is a conserved
+                             quantum number for symmetric geometries in many
+                             circumstances. Sometimes gives cleaner results than
+                             sgnjz.
+                  isopzw     Use isoparity 'isopzw', applied to the well layer
+                             only. While 'isopz' is not a conserved quantity for
+                             asymmetric geometries (e.g., a well layer and two
+                             barriers with unequal thickness), 'isopzw' can
+                             remain almost conserved in that case. Due to
+                             incomplete confinement in the well region, the
+                             eigenvalues may deviate significantly from ±1.
+                  isopzs     Use isoparity 'isopzs', applied to a region 
+                             symmetric around the centre of the well layer.
+                             Like 'isopzw', the observable 'isopzs' is also an
+                             almost conserved quantity for asymmetric geometries
+                             and tend to have eigenvalues closer to ±1, because
+                             it generally takes into account the decaying wave
+                             function in (a part of) the barriers.
+                  bia        Modified form of sgnjz, that works better if bulk
+                             inversion asymmetry (BIA) is present.
+                  helical    Momentum dependent splitting, with the quantization
+                             axis along the momentum direction. Zero at k = 0.
+                  helical0   Same as helical, but with sgn(J_z) at k = 0.
+                  cross      Momentum dependent splitting, with the quantization
+                             axis perpendicular to the in-plane momentum
+                             direction, i.e., kx Sy - ky Sx. Zero at k = 0.
+                  cross0     Same as cross, but with sgn(J_z) at k = 0.
+bia             Include bulk inversion asymmetry. Note that combination of BIA
+                with 'split' may cause unwanted asymmetries, for example under
+                kz -> -kz. (For kdotpy 2d, kdotpy ll, and kdotpy bulk)
+ignoremagnxy    Ignore the in-plane components of the magnetic field in the
+                gauge field (i.e., the 'orbital field'). The in-plane components
+                still have an effect through the Zeeman and exchange couplings
+                even if this option is enabled. Enabling this option 'simulates'
+                the calculation before the in-plane orbital fields were
+                implemented, as of version v0.58 (kdotpy 1d) or v0.74
+                (kdotpy 2d), respectively. (Alias: ignoreorbxy, ignorebxy)
+gaugezero #     Set the y position where the magnetic gauge potential is zero.
+                The position coordinates are relative: -1.0 and +1.0 for the
+                bottom and top edges of the sample, 0.0 for the center
+                (default). (Alias: gauge0)
+yconfinement #  Set a confinement in the y direction; local potential on the
+                outermost sites, in meV. A large value (such as the default) 
+                suppresses the wave function at the edges, which effectively 
+                imposes Dirichlet boundary conditions (wave functions = 0). If
+                the value is set to zero, the boundary conditions are
+                effectively of Neumann type (derivative of wave functions = 0).
+                Default: 100000 (meV) (Alias: yconf, confinement)
+
+Diagonalization options:
+neig #          Number of eigenvalues and -states to be asked from the Lanczos
+                method. (Alias: neigs)
+targetenergy #  Energy (meV) at which the shift-and-invert Lanczos is targeted.
+                If multiple values are given, then apply Lanczos at each of
+                these energies (experimental feature). If large numbers of
+                eigenvalues are to be calculated (e.g., 500), it may be faster
+                to calculate multiple sets with a smaller number of eigenvalues
+                (e.g., 5 sets of 150). Note that the values need to be chosen
+                carefully. If there is no overlap between the intervals where
+                eigenvalues are found, the calculation is aborted. For smaller
+                numbers of eigenvalues, it is recommended to use a single value
+                for targetenergy. (Alias: e0)
+energyshift #   Shift energies afterwards by this amount (in meV). Other energy
+                values may still refer to the unshifted energies. This is an
+                experimental feature that should be used with care. In case one
+                intends to merge data (e.g., using kdotpy merge), then one
+                should avoid using this option for the individual runs.
+                Afterwards, this option may be used with kdotpy merge. (Alias:
+                eshift)
+zeroenergy      Try to align the charge-neutral gap with E = 0 meV. In
+                combination with 'energyshift', align at that energy instead of
+                0 meV. See also the warnings under 'energyshift #'.
+
+System options (calculation):
+cpus #          Number of parallel processes to be used. Note: The processes do
+                not share memory, so it should be chosen such that the total
+                memory requirement does not exceed the available memory. Can
+                also be set to value 'max', 'auto' or 'automatic', for using
+                all available cores; this is the default. For a single-core
+                run, 'cpus 1' must be given explicitly. (Alias: cpu, ncpu)
+threads #       Number of threads used per process in external libraries like
+                Intel MKL (PARDISO), FEAST, LU decomposition. (Defaults to 1 if
+                omitted; Alias: nthreads) 
+gpus #          Number of parallel workers using the GPU when running a CUDA
+                capable solver (defaults to 'cpus' if omitted. Alias: gpu,
+                ngpu).
+gpuselect #     NOT YET IMPLEMENTED: Select GPU(s) to be used on a multi-GPU
+                environment. Currently uses first CUDA capable device. Can be
+                controlled via CUDA environment variables.
+showetf         Show estimated completion time in the progress monitor. When
+                this option is omitted, the 'estimated time left' (ETL) is
+                shown. This option is particularly convenient for longer jobs.
+                Alias: monitoretf
+verbose         Show more information on screen (written to stdout). Useful for
+                debugging purposes.
+tempout         Create a timestamped subdirectory in the output directory.
+                After each step that updates a DiagDataPoint instance, it is
+                'pickled' (using Python library 'pickle') and saved to disk as
+                temporary binary file. This output can be loaded with the
+                'resume' argument. See also the notes for 'resume' right below.
+keepeivecs      Keep eigenvectors in memory for all DiagDataPoints and also for
+                temporary output files (see 'tempout').
+                Warning: This can drastically increase the RAM usage.
+resume #1 [#2]  Path to folder created by argument 'tempout' during a previous
+                script run. If a matching DiagDataPoint is found in this folder,
+                it is restored into RAM and already processed calculation steps
+                are skipped.
+                Optionally, an integer step index may be specified to overwrite
+                the step from which the process is resumed. This can be used,
+                e.g. to redo the postprocessing for each DiagDataPoint, if
+                eigenvectors have been saved (see 'keepeivecs').
+                Note: Some command line arguments may be changed between runs
+                (e.g. cpu/threads configuration) without affecting the validity
+                of older DDPs for new runs. Apart from matching k & B values,
+                there is no further automatic validation.
+                Note: These files should be used for temporary storage and
+                immediate re-use only. This is not a suitable data format for
+                long-time storage. Compatibility between different versions of
+                kdotpy is NOT guaranteed. For permanent storage of eigenvectors,
+                enable the configuration option diag_save_binary_ddp. See also
+                the security warnings for the pickle module at
+                https://docs.python.org/3/library/pickle.html.
+                Usage suggestions: Resuming a preemptively canceled job (e.g.,
+                due to walltime limit, out of resources, etc), testing/debugging
+                restartable from partial solutions.
+
+
+Options affecting output
+------------------------
+Observables:
+obs #           Use observable # for the colouring of the plot. It must be one
+                of the available observables in the data files. There is also an
+                option to plot the standard deviation by prepending sigma to the
+                observable. For example, sigmay will look for y and y2 in the
+                list of observables, and assign plot colours according to the
+                values of sqrt(<y2> - <y>^2).
+                There is a special case "orbitalrgb", which colours the states
+                with RGB colours determined by the (gamma6,gamma8l,gamma8h)
+                expectation values.
+                In kdotpy, using this option will leave only the markers to
+                distinguish the data sets; without it, distinct markers and
+                colours are used.
+obsrange [#] #  Minimum and maximum value of the observable that determines the
+                colour scale. If one value is given, it is the maximum and the
+                minimum is either 0.0 or the minus the maximum, which is
+                determined by whether the standard scale is symmetric or not. If
+                this option is omitted, use the predefined setting for the
+                colour scale (recommended). (Alias: orange, colorrange,
+                colourrange)
+dimful          Use dimensionful observables. Some observables, for example z
+                and y, are dimensionless by default, and this option changes
+                them to observables with a dimension (for example length in
+                nm). This option affects output data (xml and csv) and graphics.
+                Alias: dimfull
+orbitalobs      Calculate the observables 'orbital[j]', that is the squared
+                overlaps of the eigenstates within orbital number j, where j
+                runs from 1 to norb (the number of orbitals). (For kdotpy 2d
+                only.) (Alias: orbitaloverlaps, orbobs, orboverlaps)
+llobs           Calculate the observables 'll[j]', that is the squared overlaps
+                of the eigenstates within Landau level j, where j runs from -2
+                to llmax (the largest LL index). This option is available for
+                kdotpy ll in full LL mode only. (Alias: lloverlaps)
+custominterfacelengthnm #
+                When given calculate additional interface (character)
+                observables, but within a custom length interval given by #
+                (integer value in nm).
+radians         Use radians for angular coordinate values. If omitted, use
+                degrees (default).
+
+Data and plot range:
+erange #1 #2    Energy range, minimum and maximum value in meV. The energy range
+                determines the vertical plot range in plots. It is also used as
+                range for density of states calculations. For Landau level
+                calculations, states outside the energy range are not saved in
+                the B dependence data file.
+xrange [#1] #2
+                Horizontal range to display in the plot. If just one value is
+                given, the range runs from 0 to the specified value.
+                DEPRECATED: From version v0.95 onwards, the optional third
+                argument '+' is ignored. Up to version v0.94, the extra '+' was
+                used to extend the horizontal range by 5% in both directions.
+                (Alias: krange, brange)
+dosrange [#1] #2
+                Plot range of integrated density plots. If just one value is
+                given, use [0, value] for densities and [-value, value] for
+                integrated densities. Omission means that the plot range is
+                determined automatically. If a density unit is given, e.g.,
+                'densityenm', the values are interpreted in the quantity being
+                plotted. Here, large numbers (> 1000) are interpreted as having
+                units of cm^-d and small numbers as nm^-d, where d is the
+                dimension. (Alias: densityrange, dosmax, densitymax)
+plotvar #       Plot against the given variable, instead of the default variable
+                (coordinate component).
+xoffset         Offsets the data points slightly in horizontal direction, so
+                that (almost) degenerate points can be resolved. The direction
+                (left or right) is determined by the sign of the requested
+                observable.
+
+Plot style:
+plotstyle #     Choose the plot style. (Alias: plotmode). The second argument is
+                one of the following plot styles.
+                  normal     Unconnected data points
+                  curves     Connect the data points horizontally, i.e., by band
+                             index. This option replaces the old 'join' option.
+                             (Alias: join)
+                  horizontal Group the data points 'horizontally', but plot them
+                             as separate data points.
+                  auto       Use 'curves' if possible; otherwise use 'normal'.
+                             (Alias: automatic)
+                  spin       Use different markers based on the 'jz' observable
+                             value. (NOTE: jz is the total angular momentum, not
+                             the actual 'proper' spin)
+                  spinxy, spinxz, spinyz
+                             Like the 'normal' plot, but add arrows to indicate
+                             the spin components (sx, sy), (sx, sz) or (sy, sz),
+                             respectively.
+                  spinxy1, spinxz1, spinyz1
+                             Like spinxy, spinxz, and spinyz, but rather plot
+                             directions (unit vectors) that indicate the spin
+                             direction in the given plane.
+                  berryxy, berryxz, berryyz, berryxy1, berryxz1, berryyz1
+                             Arrows indicating Berry curvature, analogous to the
+                             above spin arrow modes
+                  isopz      Use different markers based on the 'isopz'
+                             observable value.
+                Upon omission, the default value is 'auto'.
+spin            Indicate spin expectation value (up, down) with different plot
+                markers/symbols.
+
+Other plot elements, style, etc.:
+labels          Display band characters or band labels at k = 0 (B = 0 if the
+                horizontal axis is magnetic field) and Landau level indices,
+                if applicable. This option replaces 'char', which can still be
+                used as an alias.
+                Contrary to older versions, the band characters are always
+                computed, if applicable, not only if this option is given.
+                (Alias: plotlabels, char)
+title #         Assign plot title. One may use {var} to substitute the variable
+                named var. In order to find out which are the available variable
+                names (keys), use 'title ?' to get a list. The format syntax
+                follows Python's string format function, including the format
+                specification "Mini-Language"; see:
+                  https://docs.python.org/3/library/string.html#format-string-syntax
+                Here, only named variables can be specified. Positional ones,
+                like "{0}" or "{1}" are not permitted. Some special variable
+                names are:
+                  llayer(#)       For layer properties, append parenthesized
+                                  integer index (n), e.g., llayer(1), for the
+                                  property of the nth layer.
+                  b_x, b_y, b_z   Cartesian vector components
+                  b_phi, b_theta  Angular coordinates of a vector in degrees
+                  b_len, b_abs    Vector length (len and abs are aliases)
+                Alias: plottitle
+titlepos #      Position of the plot title. This may be any of the following:
+                  l, r, t, b;
+                  left, right, top, bottom, center;
+                  top-center, bottom-center;
+                  tl, tr, bl, br;
+                  top-left, top-right, bottom-left, bottom-right
+                  n, s, ne, nw, se, sw;
+                  north, south, north-east, north-west, south-east, south-west.
+                NOTE: left and right are synonyms to "top left" and "top right"
+                NOTE: Double words can be with hyphen (top-center), underscore
+                  (top_center), space ("top center"; quotes are usually needed)
+                  or be joined (topcenter).
+                NOTE: e, east, w, west are not legal values
+                Alias: plottitlepos, titleposition, plottitleposition
+legend          Include a legend in the plot. For coloured observables, this is
+                a colour bar plus the indication of the observable.
+                (Alias: filelegend)
+legend label # [label # ...]
+                If the argument 'legend' is directly followed by 'label'
+                followed by the label text, use this text in the legend instead
+                of the file names. The label text must be quoted on the command
+                line if it contains spaces. (For kdotpy compare only.)
+
+System options (output):
+out #           Determines the names of the output files. For example, if the
+                argument is "1", the program produces output1.xml, plot1.pdf,
+                etc. This option also uses variable substitution using Python's
+                string format function; see command 'plottitle' above. (Alias:
+                outfile, outid, outputid, outputname)
+outdir #        Name of the output directory. If the directory does not exist,
+                try to create it. If omitted, try to write to the subdirectory
+                "data" if it exists, otherwise in the current directory.
+                (Alias: dir, outputdir)
+
+Extras
+------
+Density of states, electrostatics, etc.:
+dos             Plot density of states and integrated density of states. The
+                Fermi energy and chemical potential are also indicated in the
+                plots and printed to stdout, if their calculation has been
+                successful. The range of validity is shown in red: In the shaded
+                regions, additional states (typically at larger momentum k) are
+                not taken into account in the present calculation and may cause
+                the actual DOS to be higher than indicated. The validity range
+                typically grows upon increasing the momentum range (argument k).
+                For kdotpy ll, 'dos' will generate equal-DOS contours and put
+                them in the LL plot. This is done either at a number of
+                predefined densities, or at the density given by 'cardens'. For
+                this script, also plot the total DOS, and the 'numeric DOS'
+                (roughly the number of filled LL).
+localdos        For kdotpy 2d, plot the 'local DOS', the momentum-dependent
+                density of states. For kdotpy ll, plot the equivalent quantity,
+                DOS depending on magnetic field. For kdotpy ll, additionally
+                plot the 'differential DOS', the integrated DOS differentiated
+                in the magnetic field direction.
+banddos         For kdotpy 2d and kdotpy bulk, output the DOS by band. One
+                obtains two csv files, for DOS and IDOS, respectively. Each
+                column represents one band. (alias: dosbyband)
+byblock         For kdotpy 2d and kdotpy ll, in combination with 'dos'. Give
+                density of states where all states are separated by isoparity
+                value (isopz = -1 or +1). Note: By nature, this function does
+                not take into account spectral asymmetry of the individual
+                blocks. (Alias: byisopz)
+densityz        For kdotpy ll, plot density as function of z at the Fermi level,
+                for all values of the magnetic field B. The output is a
+                multipage pdf file and a csv file with z and B values over the
+                rows and columns, respectively. The output is for one carrier
+                density only.
+selfcon [# [#]] Do a selfconsistent calculation of the electrostatic potential
+                ("selfconsistent Hartree"). This method solves the Poisson
+                equation iteratively, taking into account the occupied states in
+                the well. This option also provides plots of the density as
+                function of z and of the potential.
+                Two optional numerical arguments: maximum number of iterations
+                (default: 10) and accuracy in meV (default: 0.01)
+selfconweight # Use this fractional amount to calculate the new potential in
+                each iteration of the self-consistent Hartree method. This has
+                to be a number between 0 and 1. The default value is 0.9. It may
+                be set to a smaller value in case the iteration goes back and
+                forth between two configurations, without really converging. A
+                small number also slows down convergence, so the number of
+                iterations may need to be increased. (Alias: scweight, scw)
+vgate #         Replaced by option vtotal (alias v_outer; same functionality as
+                vgate formerly) and vwell (alias v_inner). (Alias: vtb, vg)
+vtotal #        Add a potential difference between top and bottom of the whole
+                layer stack. The value is in meV and may be positive as well as
+                negative. (Alias: v_outer, vouter)
+vwell #         Add a potential difference between top and bottom of the 'well
+                region'. The value is in meV and may be positive as well as
+                negative. (Alias: v_inner, vinner)
+vsurf # [# [#]] Add a surface/interface potential. The first argument is the
+                value of the potential at the interfaces (barrier-well) in meV.
+                The second parameter determines the distance (in nm) for which
+                the potential decrease to 0 (default: 2.0). If the latter
+                argument is q (alias: quadr, quadratic), then the potential has
+                a parabolic shape. Otherwise, the decrease to 0 is linear.
+                (Alias: vif)
+potential # [# ...]
+                Read potential from a file. The file must be in CSV format,
+                i.e., with commas between the data values. The columns must have
+                appropriate headings; only 'z' and 'potential' are read, whereas
+                other columns are ignored. If the z coordinates of the file do
+                not align with those of the current calculation, then values are
+                found by linear interpolation or extrapolation. If extrapolation
+                is performed, a warning is given.
+                The first argument must be a valid file name. The following
+                arguments may be further filenames, and each filename may be
+                followed by a number, interpreted as multiplier. For example,
+                'potential v1.csv -0.5 v2.csv' will yield the potential given by
+                V(z) = -0.5 * V1(z) + V2(z). Multiple arguments 'potential' are
+                also allowed; the results are added. Thus,
+                '... potential v1.csv -0.5 ... potential v2.csv ...'
+                is equivalent to the previous example.
+potentialbc #   Apply custom boundary conditions for solving Poisson's equation
+                in selfconsistent calculations. The argument must be a string,
+                which can be one of three different formats:
+                1. Input like python dict instance without any spaces:
+                   "{'v1':5,'z1':-10.,'v2':7,'z2':10.}"
+                   All boundary names must be given explicitly, the order is
+                   irrelevant.
+                2. Input single quantities as string separated with semicolon
+                   without any spaces:
+                   "v1=5;z1=-10.;v2=7;z2=10."
+                   All boundary names must be given explicitly, the order is
+                   irrelevant.
+                3. Input quantity pairs as string separated with semicolon
+                   without any spaces:
+                   Either explicit: 'v1[-10.]=5;v2[10.]=7'
+                   Or implicit: 'v[-10.]=5;v[10.]=7'
+                   When using the explicit format, the order is irrelevant. When
+                   using the implicit format there is an internal counter, which
+                   applies an index to the quantity name, thus, the order does
+                   matter.
+                Here, all given examples will result in the same boundary
+                condition dictionary: {'v1':5,'z1':-10.,'v2':7,'z2':10.}.
+                The z-values must be given as coordinate in nm, or as one of the
+                following labels:
+                  bottom      Bottom end of the layer stack
+                  bottom_if   Bottom interface of the "well" layer
+                  mid         Center of the "well" layer
+                  top_if      Top interface of the "well" layer
+                  top         Top end of the layer stack
+                If less than 4 key-value pairs are given, only the corresponding
+                values of the automatically determined boundary conditions
+                are overwritten. The ones that do not appear in the automatic
+                determined boundary conditions, are ignored. This also means,
+                you can decide to only overwrite the z-coordinates but keep the
+                automatic determined values for 'v1', 'v2', etc.
+                If two full boundary conditions are given (4 or 5 key-value
+                pairs), automatic boundary conditions are always fully
+                overwritten.
+                NOTE: A special case for the implicit type 3 input is 'v12'.
+                There you can use 'v[-10.,10.]=0;v[0.]=7' for example, which
+                will result in {'v12':0,z1':-10.,'z2':10.,'v3':7,'z3':0.}.
+                Another special case for the same input type is the combination
+                of 'dv1' and 'v1' (or 'dv2' and 'v2'). Here you can use 
+                'dv[-10.]=0;v[-10.]=7' (or 'v[-10.]=7;dv[-10.]=0'); note the
+                same z-coordinates.
+                (Alias: potbc)
+cardens # [...] Carrier density in the well in units of e/nm^2. This value sets
+                the chemical potential, i.e., "filling" of the states in the
+                well. The sign is positive for electrons and negative for holes.
+                (Alias: carrdens, carrierdensity, ncarr, ncar, ncarrier)
+                In combination with 'kdotpy ll ... dos', specify the density at
+                which the equal-density contour should be drawn. (See 'dos'
+                above.)
+                This option may take a single number as well as a range. This is
+                an new feature that has not been fully implemented: Most
+                functions involving carrier density will just take the first
+                value of the range.
+ndepletion # [#]   Density of the depletion layer(s) in the barrier, in units of
+                   e/nm^2. The sign is positive for holes and negative for
+                   electrons. The whole sample is neutral if the arguments
+                   'cardens' and 'ndepletion' come with the same value. If one
+                   value is specified, the charge is equally divided between
+                   top and bottom barrier. If two values are specified, they
+                   refer to bottom and top layer, consecutively.
+                   (Alias: ndepl, ndep)
+ldepletion # [#]   Length (thickness) of the depletion layers in nm. The values
+                   may be numbers > 0 or "inf" or "-" for infinite (which means
+                   zero charge volume density). The numbers refer to the bottom
+                   and top barrier, respectively. If a single value is given,
+                   use the same value for both bottom and top barrier. The
+                   default (if the argument is omitted) is infinity.
+                   (Alias: ldepl, ldep)
+efield # #      Electric field at the bottom and top of the sample in mV/nm.
+                Alternatively, one may enter a single value for either the top
+                or the bottom electric field:
+                  efield -- #, efield top #, efield t #, efield # top, ... (top)
+                  efield # --, efield btm #, efield b #, efield # btm, ... (btm)
+                If the variant with two values is used, the carrier density is
+                calculated automatically. In that case, the explicit input of
+                the carrier density (option 'cardens') is not permitted.
+                NOTE: 'efield 0 #' is not the same as 'efield -- #'.
+                NOTE: A positive electric field at the top boundary corresponds
+                to a negative gate voltage, and vice versa.
+berry           Calculate and plot the Berry curvature for the states close to
+                the neutral gap. Also plot the integrated Berry curvature as
+                function of energy. If combined with the option 'dos', then also
+                plot the integrated Berry curvature as function of density. For
+                LL mode (kdotpy ll), the Berry curvature is implicitly
+                integrated, and the resulting output values are the Chern
+                numbers of the eigenstates instead.
+                (Alias: chern (for kdotpy ll only))
+broadening [#] [#] [#]
+                Broadening parameter for the density of the eigenstates (for
+                kdotpy ll: Landau levels). The broadening is determined by a
+                numerical width parameter which may be supplemented by
+                additional parameters, the broadening shape, the scaling
+                function, and a parameter for the Berry broadening (the latter
+                for kdotpy ll only).
+                The broadening types are:
+                  thermal  Fermi distribution, width parameter is temperature;
+                           if the width parameter is omitted, use the
+                           temperature set by 'temp #'.
+                  fermi    Fermi distribution, width parameter is energy
+                           (alias: logistic, sech)
+                  gauss    Gaussian distribution, width parameter is energy
+                           (alias: gaussian, normal)
+                  lorentz  Lorentzian distribution, width parameter is energy
+                           (alias: lorentzian)
+                  step     Dirac-delta or Heaviside-step function; if width
+                           parameter is given, it is ignored (alias: delta)
+                If omitted, use the default 'auto' which selects 'thermal' for
+                dispersion mode and 'gauss' for LL mode.
+                The scaling function determines how the width w scales as
+                function of x (momentum k in nm^-1 or field B in T) (w1 is the
+                input width parameter):
+                  auto   Use 'const' for dispersion mode and 'sqrt' for LL mode
+                         (alias: automatic)
+                  const  Use constant width, w = w1
+                  lin    The width scales as w = w1 x (alias: linear)
+                  sqrt   The width scales as w = w1 sqrt(x)
+                  cbrt   The width scales as w = w1 x^1/3
+                  ^n     Where n is a number (integer, float, or fraction like
+                         '1/2'). The width scales as w = w1 x^n.
+                The final optional value (kdotpy ll only) is numeric (floating
+                point number or a percentage like '10%') that defines a
+                different broadening width for the Berry curvature/Hall
+                conductivity. Floating point input is interpreted as the
+                broadening width itself, a percentage defines this broadening
+                width as percentage of the density broadening width. The
+                Berry/Hall broadening inherits the shape and scaling function 
+                from the density broadening.
+                Multiple 'broadening' arguments may be given; these will then
+                be iteratively applied to the (integrated) DOS, in the given
+                order. NOTE: Due to limitations of the numerical integration
+                (convolution operation), combining multiple broadening functions
+                may lead to larger numerical errors than a single broadening
+                function. The convolution operation is commutative only up to
+                numerical errors, so changing the order may lead to slight
+                differences in the result.
+berrybroadening [#] [#] [#]
+                Broadening parameter for the Berry curvature/Hall conductivity.
+                The syntax is the same as the ordinary broadening parameter.
+                Also multiple ones can be combined. Note that it is not
+                permitted to combine 'berrybroadening' with a 'broadening' with
+                argument with a extra numerical argument, for example,
+                'broadening 0.5 gauss 10%'. For kdotpy ll only. (Alias:
+                hallbroadening, chernbroadening)
+dostemp #       Temperature used for thermal broadening of the DOS. This input
+                is equivalent to 'broadening # thermal const' (but only one of
+                these may be used at a time). This temperature may be different
+                than the temperature set by 'temp' on the command line (which
+                controls the temperature in the exchange coupling, for example).
+                If neither 'dostemp' nor 'broadening' is given, no broadening is
+                applied. (NOTE: This behaviour has changed since v0.85.)
+                This option is especially useful for calculating the DOS with
+                the re-plotting scripts: In that case, 'temp' has no effect,
+                because the value is read from the data files, whereas 'dostemp'
+                *can* be used to set the thermal broadening.
+                (Alias: tbroadening, tempbroadening)
+hall            Shortcut that activates all options for calculation of Hall
+                conductivity with kdotpy ll. It is equivalent to the
+                combination 'berry dos localdos broadening 0.5 10%'. The default
+                value of the broadening can be overridden with the explicit
+                option 'broadening # [#]' combined with 'hall'. This applies to
+                berrybroadening as well.
+ecnp #          Set charge-neutral energy (zero density) to this energy. The
+                value determines the point where the density is zero. This
+                affects integrated density of states in dispersion mode only.
+                In order to manipulate band indices by determining the zero gap,
+                use the 'bandalign' argument. (Alias: cnp, efermi, ef0)
+densoffset #    Set a density offset. Basically, this number is added to the
+                integrated DOS / carrier density in the selfconsistent
+                calculation. The value is in units of charge density and can be
+                interpreted as free carriers inside the quantum well. (Alias:
+                noffset, ncnp)
+cardensbg #     Set a background density. Calculates a rectangular carrier
+                distribution for this number, which is then added to the carrier
+                distribution used in solving Poisson's equation. The value is in
+                units of charge density and can be interpreted as immobile
+                background charge.
+idosoffset #    Set an offset to the density of states, in appropriate DOS
+                units. This option is identical to 'densoffset' up to a factor
+                of 4 pi^2. (Alias: dosoffset)
+NOTE: Most of these options are currently available with kdotpy 2d only. Some
+can be used in kdotpy ll, which is explicitly indicated above.
+
+Other extras:
+plotfz [#]      See comments "Plotting z-dependent values" below. (Alias:
+                plotqz)
+plotwf [# ...]  Plot wave functions. The extra arguments are the plot style for
+                the wave function plot and the locations (momenta) for which the
+                plots are made. See also comments "Plotting wave functions"
+                below.
+overlaps        Calculate overlaps between the eigenstates with those at zero
+                momentum. By default, the overlaps are calculated with E1+/-,
+                H1+/-, H2+/-, and L1+/-. A nice visualization can be obtained
+                with 'obs subbandrgb', which assigns colours depending on the
+                overlaps with E1, H1, and H2. A visualization with different
+                bands can be obtained by using 'obs subbandh1e1e2', for example,
+                where the observable id ends with >= 3 pairs of subband
+                identifiers. Each subband identifier is a band character ('e',
+                'l', or 'h' followed by a number) denoting a pair of subbands,
+                a single subband (the previous followed by '+' or '-'), or a
+                band index (a signed integer preceded by 'b' or parenthesized,
+                e.g., 'b+2', '(-25)').
+                (kdotpy 2d and kdotpy ll)
+transitions [#1 #2] [#3]
+                Calculate and plot transitions between levels. There can be up
+                to 3 optional numerical arguments: The first pair is the energy
+                range where transitions are calculated. If omitted, calculate
+                transitions between all calculated states (which may be
+                controlled with 'neig' and 'targetenergy'). The last argument is
+                the square-amplitude threshold above which the transitions are
+                taken into account. If omitted, the program uses the default
+                value 0.05.
+minmax          Output the minimum, maximum, and zero-momentum energy of each
+                subband. (kdotpy 2d and kdotpy bulk)
+extrema         Output the local extrema of each subband. The output contains
+                the type (min or max), the momentum, the energy, and an estimate
+                for the effective inertial mass along the momentum direction.
+                (kdotpy 2d and kdotpy bulk; alias: localminmax, minmaxlocal)
+bhz [# ...]     Do a Löwdin expansion around zero momentum in order to derive a
+                simplified Hamiltonian in the subband basis. This is the
+                generalization of the BHZ model. The arguments specify the
+                number of subbands in the basis ('A bands') and (optionally) how
+                many bands should be treated perturbatively. The argument
+                pattern is one of:
+                  #na                  Number of A bands
+                  #na_b #na_a          Number of A bands below and above CNP
+                  #nl #na #nu          Number of lower perturbative bands,
+                                       A bands, and upper perturbative bands
+                  #nl #na_a #na_b #nu  Number of lower perturbative bands,
+                                       A bands below and above CNP, and upper
+                                       perturbative bands
+                  #a_labels            Labels of A bands, e.g., E1, H1+, H1-.
+                  #nl #a_labels #nu    Number of lower perturbative bands,
+                                       labels of A bands, labels of lower
+                                       perturbative bands.
+                If the number of perturbative bands is omitted, take the maximum
+                possible number of bands that are localized in the well.
+                The #a_labels argument may contain subband characters with or
+                without sign. Band indices with explicit sign (e.g., -1, +2) may
+                also be used.
+                If 'bhz' is given without arguments, 'bhz 2 2' is implied. This
+                is the standard four-band BHZ model.
+kbhz #          Set the reference momentum for the BHZ (Löwdin) expansion. The
+                argument refers to a momentum value on the kx axis. This
+                experimental option can be used for expansions around nonzero
+                momentum. Please consider the results with care, as they are not
+                always meaningful. (Alias: bhzk, bhzat)
+symmetrytest    Analyze the symmetries of the eigenvalues and observables under
+                various transformations in momentum space. This results in a
+                list of compatible representations of the maximal point group
+                Oh, from which the program tries to determine the actual
+                symmetry group (point group at the Gamma point).
+                NOTE: For a reliable result, the momentum grid must be
+                compatible with the symmetries; a cartesian grid should be used
+                for cubic symmetry, a polar or cylindrical grid otherwise.
+                For kdotpy 2d, kdotpy bulk: full analysis.
+                For kdotpy 1d, kdotpy merge: partial analysis (full analysis
+                to be implemented).
+symmetrize      Extend the data in the momentum space by symmetrization. For
+                example, a 1D range for positive k can be extended to negative
+                k, or a 2D range defined in the first quadrant can be extended
+                to four quadrants. The extension is done by taking the known
+                eigenvalues and observables and transforming them appropriately.
+                NOTE: The algorithm relies on some pre-defined transformation
+                properties of the observables, and should be used with care. A
+                cross-check with a symmetric range and 'symmetrytest' is
+                advised.
+                (kdotpy 1d, kdotpy 2d, kdotpy bulk, and kdotpy merge)
+
+Deprecated and removed options
+------------------------------
+
+Removed functionality (no longer works):
+densitycm, densitynm, densitypcm, densitypnm, densityecm, densityenm,
+densityunit, densunit, dunit 
+                Set the quantity and unit in which to plot density of states.
+                DEPRECATED: Use configuration values 'dos_quantity' and
+                'dos_unit'.
+e1shift # [#]   Apply an artificial shift to the E1 bands. DEPRECATED: No longer
+                works.
+ignoreexchange  Ignore the contributions from the paramagnetic exchange
+                coupling. DEPRECATED: In order to disable exchange coupling, 
+                adjust the relevant material parameters.
+nolatticereg    If specified, disable the transformation k --> sin(k).
+                DEPRECATED: No longer works. Use configuration value
+                'lattice_regularization=false' (default value).
+latticereg      If specified, force lattice regularization k --> sin(k).
+                DEPRECATED: No longer works. Use configuration value
+                'lattice_regularization=true'.
+plotrc #        Specify the location of the matplotlibrc file. (Alias: rcfile)
+                DEPRECATED: Plot customization is now done by setting the
+                configuration value 'fig_matplotlib_style'
+yMn #           Mn concentration in the 2DEG layer (HgMnTe). The number may be a
+                fraction or a percentage. (Alias: Mn)
+                DEPRECATED: Use "mwell HgMnTe #"
+yCd #           Cd concentration in the barrier layers (HgCdTe). The number may
+                be a fraction or a percentage. (Alias: Cd)
+                DEPRECATED: Use "mbarr HgCdTe #"
+
+Vectors and ranges
+------------------
+The following 'patterns' apply to the input of the vector-valued quantities 'k'
+(momentum) as well as 'b' (magnetic field). The examples are given in terms of
+'k', but extend also to the case 'b'.
+
+The following combinations of components can be input:
+k               Vectors along default axis (kx for k, bz for b), value is length
+kx              Vectors along x axis
+ky              Vectors along y axis
+kz              Vectors along z axis
+kx ky           Vectors with coordinates (x, y) in a cartesian 2D grid
+kx ky kz        Vectors with coordinates (x, y, z) in a cartesian 3D grid
+k kphi          Vectors in a polar representation
+k kphi z        Vectors in cylindrical coordinates
+k ktheta kphi   Vectors in spherical coordinates (theta is polar angle, phi is
+                azimuth angle)
+k kdir          Vectors along a constant vector (parametrized as spherical and
+                polar coordinates in 3D and 2D, respectively)
+k kperp         Vectors in two perpendicular directions (first coordinate
+                generally refers to the default axis)
+k kperp kphi    Vectors in two perpendicular directions, then rotated by an
+                angle phi (in xy plane) (POSSIBLY IN THE FUTURE)
+NOTE: The polar and cylindrical coordinates are parametrized as
+  (x, y) = (r cos phi, r sin phi)
+and
+  (x, y, z) = (r cos phi, r sin phi, z),
+respectively.  The spherical coordinates are parametrized as
+  (x, y, z) = (r sin theta cos phi, r sin theta sin phi, r cos theta).
+Angles are given in degrees, unless the argument 'radians' is included.
+Negative radii are allowed, which is useful if one calculates along one axis
+with 'k -0.2 0.2 / 10 kphi 45', for example.
+NOTE: The order of the arguments is irrelevant. If doing a 2D or 3D range, then
+the coordinates are iterated through in a 'natural' order, e.g., first x, then
+y, or r, then theta, then phi.
+NOTE: For polar, cylindrical and spherical coordinates, one may input a single
+value or a one-dimensional range using a cartesian component combined with the
+angle(s). Examples: kx kphi, kx kphi ktheta, kz ktheta. This yields vectors at
+the given angle where the radius is scaled such that the given cartesian
+component has the given value(s). The angle must be single-valued, and singular
+angles lead to an error. The values are stored in the angular representation,
+and in order to get a plot in a cartesian component, use the option 'plotvar'.
+NOTE: The argument kdir is special in the sense that it does not define a
+range, but a constant vector. Its argument must always be a triplet of
+integers, for example 'kdir 1 1 0' or 'kdir 110'. The latter short version can
+be used if all components are between -9 and 9. There are several restrictions,
+e.g., in 2D mode one cannot have a nonzero z component. The resulting
+coordinates are parametrized in spherical (3D) or polar (2D) representation.
+
+Specification of values and ranges:
+k #             Single value
+k #1*#2         Single value #1 * #2, where #1 is an integer, and #2 could be
+                seen as a step size
+k #1/#2     (a) If #1 float, #2 float: #1 is maximum, #2 is step size.
+                  k = 0, 1*#2, 2*#2, ..., #1
+            (b) If #1 float, #2 int  : #1 is maximum, #2 is number of steps.
+                  k = 0, (1/#2)*#1, (2/#2)*#1, ..., #1.
+k #1 #2/#3  (a) If #1 float, #2 float, #3 float: #1 is minimum, #2 is maximum,
+                #3 is step size
+                  k = #1, #1 + 1*#3, #1 + 2*#3, ..., #2.
+            (b) If #1 float, #2 float, #3 integer: #1 is minimum, #2 is maximum,
+                #3 is number of steps
+                  k = #1, #1 + (1/#3)*#2, #1 + (2/#3)*#2, ..., #2.
+k #1 #2 #3/#4   Single momentum value "#3 of #4 steps from #1 to #2". #3 and #4
+                are integers.
+                  k = #1 + (#2 - #1) * #3 / #4
+NOTE: For the cases where a single slash is followed by an integer, doubling the
+slash will do 'quadratic stepping'. For example: "k 0 1 // 10" yields the
+momentum values 0.0, 0.01, 0.04, 0.09, ..., 0.81, 1.0. This option is especially
+useful with the variable 'b' in Landau-level calculations.
+
+Some examples:
+k -0.2 0.2 / 10              Values along the kx axis (default axis for k)
+kx 0.1 ky 0.1                Single (kx, ky). (Alias: k # kperp #)
+k 0.2 kphi 45                Single k value (k cos(phi), k sin(phi)).
+kx -0.2 0.2 / 10 ky 0.1      Range in kx direction, for single ky value.
+k -0.2 0.2 / 10 kperp 0.1    Equivalent to previous example
+k -0.2 0.2 / 10 kphi 45      Range of (k cos(phi), k sin(phi)) with a range of
+                             radii k.
+kx -0.2 0.2 / 10 kphi 45     Range of (k cos(phi), k sin(phi)) such that the kx
+                             values run from -0.2 to 0.2. Only a single value
+                             for phi is allowed. Singular values (e.g., 90) are
+                             forbidden.
+b 0 1 / 10                   Values along the bz axis (default axis for b),
+                             equally spaced, i.e., 0.0, 0.1, 0.2, 0.3, ... 0.9,
+                             1.0.
+b 0 1 // 10                  Values along the bz axis, quadratically spaced,
+                             i.e., 0.0, 0.01, 0.04, 0.09, ... 0.81, 1.0
+kx 0 0.2 / 10 ky 0 0.2 / 10  2D scan of (kx, ky)
+k 0 0.2 / 10 kphi 0 90 / 6   2D scan over radii k and angles phi
+b 0 0.2 / 10 btheta 60 bphi 90
+                             Vectors with a range of lengths at spherical angles
+                             theta = 60 degrees and phi = 90 degrees.
+kx 0 0.2 / 10 ky 0 0.2 / 10 kz 0 0.2 / 10
+                             3D scan over kx, ky, kz
+k 0 0.2 / 10 ktheta 0 90 / 6 kphi 0 90 / 12
+                             3D scan over spherical coordinates
+k 0 0.2 / 10 kdir 1 1 1      Range of values parallel to vector (1,1,1)
+k 0 0.2 / 10 kdir 111        Range of values parallel to vector (1,1,1)
+k -0.2 0.2 / 10 kperp 0.2 kphi 45
+                             Range of (k cos(phi), k sin(phi)) + (-kperp
+                             sin(phi), kperp cos(phi)) with a range of values k.
+                             (POSSIBLY IN THE FUTURE)
+
+Extras in detail
+================
+
+Plotting wave functions
+-----------------------
+The band structure scripts (kdotpy 1d, kdotpy 2d, etc.) have the option to plot
+the wave functions together with the other (dispersion/dependence) plots. This
+is done with the argument 'plotwf', which have some additional options that
+determine the plot style and the locations (momenta or magnetic fields) at which
+the plots are made. There can be only one plot style argument, but multiple
+location arguments. The extra arguments can be omitted, in which case the
+default plot style and locations are used.
+
+Plot styles (1D):
+z      As function of z, at y = 0 (at the middle of the sample), i.e.,
+       psi(z, 0). Decomposition into orbitals. (Alias: 1d)
+y      Absolute-squared, as function of y, integrated over the z coordinate,
+       i.e., |psi|^2(y) = int |psi(z, y)|^2 dz. Decomposition into orbitals
+       and optionally subbands.
+zy     Absolute-squared, as function of (z, y), i.e., |psi(z, y)|^2, summed over
+       all orbitals. (Alias: yz, default)
+color  Absolute-squared, with different colours for the orbitals, as function of
+       (z, y). (Alias: colour, byband, by_band)
+
+Plot styles (2D, LL):
+separate  As function of z, with different curves for each orbital. (Alias: all,
+          default)
+together  Absolute-squared, as function of z, together in one plot.
+
+Locations (momentum values k [or B in LL mode]):
+zero      At k = 0. (Alias: 0)
+min       At the smallest k.
+max       At the largest k.
+mid       The k value in the center of the momentum range.
+minmax    At the smallest and the largest k. (Equivalent to 'min max')
+all       Plot wavefunctions for all k. Warning: This can lead to very large
+          output and long postprocessing times.
+three     At the smallest, largest, and central k value. (Equivalent to
+          'min mid max'; alias: 3, halves)
+five      Like three, but in addition at the quarters, i.e., 1/4 and 3/4 along
+          the momentum range. (Alias: 5, quarters)
+#         (any number other than '0', '3', '5') At this value.
+
+Sets of multiple momentum values take effect in 2D only. In 1D, take the first
+or the smallest value.
+If calculating over a multi-dimensional momentum grid, the locations apply to
+each component separately. Any value that would not be in the range (e.g., 'mid'
+in a range with an even number of values) is skipped. It is not possible to
+specify different values for different dimensions, although lengths and angles
+are easily separated by their different orders of magnitude, for example:
+'plotwf 0 0.1 45' would match polar momentum values (0, 0 deg), (0, 45 deg),
+(0.1, 0 deg), and (0.1, 45 deg).
+In LL mode, the locations are interpreted in terms of magnetic fields.
+Depending on the configuration value 'wf_location_exact_match', match the
+locations exactly (if true; default value) and skip the locations if no match is
+found, or find the nearest value (if false).
+If symmetrization is used (command line argument 'symmetrize'), then wave
+functions output happens only at the points of the original grid, i.e., where
+the diagonalization has been done explicitly. At extrapolated points not present
+in the original grid, no output will be given.
+
+Additional comments:
+If erange is set, plot the wave functions in this energy range only.
+
+For plot styles 'zy' and 'color', the outputs will be files with names
+wf_xxmeV_yyy.png or wf_xxmeV_yyy.pdf. If the 'convert' command is present, a
+multipage PDF is created. The separate files will not be deleted. It may be
+advisable to clean up files from earlier runs first.
+
+For the other plot styles, the result will be a single PDF file. This will be a
+multipage PDF if multiple plots are concerned. In the rare case that the
+creation of a multipage PDF page is not possible, multiple separate PDFs are
+made.
+
+
+Plotting z-dependent values
+---------------------------
+For the 1d and 2d scripts, the option 'plotfz' outputs plots of some z-dependent
+parameters that determine the Hamiltonian. The values are also written to a CSV
+format data file.
+
+The plots contain:
+qz-bands.pdf       E_c and E_v               (conduction and valence band edges)
+qz-fgammakappa.pdf F, gamma_{1,2,3}, kappa   (miscellaneous band parameters)
+qz-exchange.pdf    y N_0 alpha, y N_0 beta   (paramagnetic exchange energies)
+
+The option 'legend' will include a legend in these plots (recommended). The
+filenames are also affected by the option 'out'.
+
+
+BHZ calculation
+---------------
+Approximations to the BHZ model are done by invoking kdotpy 2d with the extra
+option 'bhz'.
+
+The number of bands in the perturbation theory is input using the argument
+'bhz', which may take up to four even-integer parameters:
+1. kdotpy 2d [...] bhz #na
+2. kdotpy 2d [...] bhz #na_b #na_a
+3. kdotpy 2d [...] bhz #nl #na #nu
+4. kdotpy 2d [...] bhz #nl #na_b #na_a #nu
+5. kdotpy 2d [...] bhz #a_labels
+6. kdotpy 2d [...] bhz #nl #a_labels #nu
+Here, #na determines the number of 'A' bands, i.e., the bands which are treated
+exactly at k = 0. With #na_b and #na_a one can specify the number of A bands
+above and below the (charge-neutral) gap. The parameters #nl and #nu determine
+the number of 'lower' and 'upper' bands that are taken into account
+perturbatively. If not given (cases 1., 2., 5.), then take the maximum possible
+number of bands that are localized in the well. In the other cases, when
+non-localized bands are included, a warning is issued, but the calculation will
+continue (if no other errors occur).
+The A bands may also be given as a sequence of labels #a_labels (cases 5. and
+6.). For example, 'bhz 6 E1 H1 2' is the usual four-band BHZ model with 6 and 2
+bands taken into account perturbatively below and above. In this case, the order
+of the basis in the output is the order of bands given as input. It is possible
+to distinguish between the two spin components (e.g., by using input 'bhz 6 E1+
+E1- H1+ H1- 2', but this is not necessary. A band label without + or - implies
+the two respective components.
+For the A bands, one may also use band indices instead of or in combination with
+band labels. Band indices are input as integers with signs, for example
+'bhz 6 -2 -1 +1 +2 2'. The explicit + sign for positive band indices is
+required. This option may be useful if band characters are not available, for
+example at finite magnetic field.
+
+The default value is 'bhz 2 2', i.e., the standard four-band BHZ model. Another
+typical setting is 'neig 50 bhz E1 H1 H2' for a six-band model.
+
+There is an experimental option 'kbhz #' (alias 'bhzk', 'bhzat') for doing a
+BHZ-like calculation (Löwdin perturbation) around nonzero momentum. Presently,
+only points on the x axis are supported. The output needs to be considered with
+care. Please test and report.
+
+
+kdotpy merge and kdotpy compare
+===============================
+
+The separate command kdotpy merge provides the ability to create one plot from
+data points scattered over different files.
+The command kdotpy compare takes multiple files and plots the data in one plot,
+distinguishing between the data sets using different markers and/or colours.
+
+They are called as:
+$ kdotpy merge option1 arg1 option2 arg2 arg3 file1 file2 ...
+$ kdotpy compare option1 arg1 option2 arg2 arg3 file1 file2 ...
+where file1, file2, ... are the data (xml) files.
+Note: In case any of the options or arguments conflict with existing file or
+directory names, one can put '--' in front of the file names, like:
+$ kdotpy merge option1 arg1 option2 arg2 arg3 -- file1 file2 ...
+The '--' signals that following arguments may be read as file names and
+preceding arguments should not.
+
+The additional options are:
+sortdata        Sort the data by momentum (k) or parameter (B) value. This will
+                succeed only if the data set is on a grid.
+select #1 #2    Select a subset of the grid. The first argument #1 must be a
+                momentum component (e.g., k, kx, kphi) and the second argument
+                #2 must be a numerical value. This selects all data points for
+                which the momentum component is equal to #2.
+bandalign [# [#]]
+                Try to (re)connect the data points, by (re)assigning the band
+                indices. The first optional argument determines the 'anchor
+                energy', i.e., the energy at k = 0 (or B = 0) that separates
+                bands with positive and negative indices. If the second argument
+                is given, treat the gap at the given energy as having this
+                index.  Omission of the first argument causes the anchor energy
+                to be determined automatically. Explicit specification of this
+                energy is necessary only if the automatic method appears to
+                fail, or if the correct assignment of the band indices is
+                important (e.g., for calculation density of states). When the
+                second argument is omitted, use the default gap index 0.
+                Alternatively, 'bandalign filename.csv' may be used to use the
+                energies in the csv file in order to do assign the band indices.
+                The format is that of 'dispersion.byband.csv', i.e., energies
+                of each of the bands in columns. This option may be used to
+                manually 'correct' an incorrect band connection result.
+                If the data is not sorted (as function of k or B), then try to
+                sort the data automatically before applying the band alignment
+                algorithm. (Alias: reconnect)
+vs              For kdotpy compare: Separate files into sets. Basically, apply
+                the functionality of kdotpy merge to the comparison script.
+                Example:
+                $ kdotpy compare [options] f1 f2 f3 vs f4 f5
+                This compares the combination of files f1, f2 and f3 to the
+                combination of files f4 and f5. There can be more than two sets
+                by using vs more than once, and each set can have an arbitrary
+                number of files.
+                If not given, compare the specified files one-by-one.
+writecsv        For kdotpy merge: Write csv output (dispersion or B dependence)
+                based on merged data.
+
+Note:
+The options for kdotpy 1d and kdotpy 2d that affect plotting, such as 'erange',
+also apply to the plots made by kdotpy merge.
+
+
+kdotpy batch
+============
+
+kdotpy batch is an auxiliary subprogram for running any of the other scripts in
+batch mode, i.e., many runs in sequence, with a variation in the parameters. The
+philosophy is more or less like a shell (bash) script, but with a bit more
+'decoration' and with proper handling of runs in parallel.
+
+The general syntax is best illustrated with an example:
+$ kdotpy batch [opts] @x 5.0 9.0 / 4 @y [CdTe, HgTe, HgCdTe 68%]
+    do kdotpy 2d [opts] llayer 10 @x 10 mater HgCdTe 68% @y HgCdTe 68%
+    out -@0of@@
+
+What happens is the following: The command following the argument 'do'
+('kdotpy 2d' in this case) is executed iteratively with '@x' replaced by 5.0,
+6.0, 7.0, 8.0, and 9.0, and '@y' by the values 'CdTe', 'HgTe', and 'HgCdTe 68%'.
+In this example, 'kdotpy 2d' will be executed 15 times. The '@0' and '@@' are
+replaced by counting and total value respectively. Thus, the output files will
+be numbered as '1of15', '2of15', etc.
+
+The variables 'x' and 'y' have been specified before the 'do'. In this case, 'x'
+is defined with a range specification (same syntax as the 'Specification of
+momenta', see above) and 'y' with a list specification, within brackets [] and
+separated by commas.
+
+Overview of the replacements:
+  @@              Total number of iterations
+  @0              Iteration counter, starting at 1
+  @1, @2, ...     Iteration counter for each @ variable, in order of their input
+                  before the 'do'.
+  @variablename   Value of variable with name 'variablename'
+  @{variablename} Value of variable with name 'variablename'
+NOTE: Variable names may not be a one-digit number or @. The variable name may
+also not contain curly brackets, { or }. Other than these restrictions,
+everything is allowed. In the command after 'do', please make sure that variable
+names do not 'clash': For example, if both @x and @xy are used, in any @xy after
+'do', the @x may be replaced first, leading to unexpected results. This may be
+prevented by using the @{variablename} pattern consistently.
+
+Options (to be specified before 'do'):
+dryrun           Display the scripts that would be executed, following the
+                 replacements of the @ variables. However, do not actually run
+                 them.
+cpu #            Specify number of threads/processes that should run in
+                 parallel. This option takes into account the 'cpu' option in
+                 the invocation of kdotpy that is run (i.e., specified after
+                 'do') in order to determine the number of jobs that can run in
+                 parallel. For example,
+                 'kdotpy batch cpu 6 ... do kdotpy 2d ... cpu 2'
+                 will cause three (= 6 / 2) instances of kdotpy 2d to be run in
+                 parallel.
+proc #           Number of parallel processes to be run. This option does not
+                 take into account what is specified after 'do'. For example:
+                 'kdotpy batch proc 6 ... do kdotpy 2d ... cpu 2'
+                 will cause six instances of kdotpy 2d to be run in parallel,
+                 and each instance might run two processes/threads in parallel.
+
+NOTE: If both 'cpu' and 'proc' are not given, default to the maximum number of 
+available CPUs. If also the command does not specify the number of CPUs, then
+run the command sequentially with each running at the maximum number of CPUs.
+
+NOTE: The console output (stdout and stderr streams) of the individual kdotpy
+commands is 'dumped' into the files stdout.1.txt, stdout.2.txt, ... and
+stderr.1.txt, stderr.2.txt, ... . The kdotpy batch subprogram writes its own
+information to the standard stdout and stderr streams.
+
+
+Configuration option reference
+==============================
+
+The configuration file (typically $HOME/.kdotpy/kdotpyrc) hosts a number of
+configuration options that are meant to be 'semi-static', e.g., configurable
+options that need to be changed occasionally or even only once, as opposed to
+parameters that are specified with every run.
+
+NOTE: If the configuration file is unavailable, but there is an old
+configuration file at '$HOME/.hgmnte/hgmnterc', the latter file will be copied
+from the old location to the new one. This should happen automatically the first
+time any kdotpy script is run after the renaming. You should remove your old
+configuration directory '$HOME/.hgmnte' manually in order to get rid of the
+warnings.
+
+If desired, custom configuration can be loaded from the command line in two
+ways:
+config file.cfg     Where 'file.cfg' is a file name. This loads custom
+                    configuration values from the given file. If the file with
+                    the given name is not found in the working directory, then
+                    also search in the configuration directory (usually
+                    ~/.kdotpy).
+config "fig_hsize=150; fig_vsize=100"
+                    Take the given keys and values directly from the command
+                    line. The "key=value" pairs must be separated by semicolons
+                    (;). In almost all cases, it is necessary to enclose the
+                    list of key-value pairs in double quotes "like this" or
+                    single quotes 'like this'.
+It is possible to combine multiple instances, for example:
+  config file1.cfg config "fig_hsize=150; fig_vsize=100" config file2.cfg
+The files/inputs are processed from left to right: newer values override
+existing values.
+
+The configuration takes following precedence:
+1. Custom value (from custom configuration file or command line) if set. If not
+   set, then
+2. Values from the loaded XML files (kdotpy merge and kdotpy compare only);
+   latest loaded file takes highest precedence. If not applicable or not set,
+   then
+3. Initial value (from $HOME/.kdotpy/kdotpyrc) if set. If not set, then
+4. Default value (hardcoded into the program).
+
+Currently, the following options have a meaning:
+diag_solver        The implementation of the function that does the matrix
+                   diagonalization. Possible values:
+                     feast       Use FEAST algorithm (Intel MKL). If this
+                                 package is not available, fall back to eigsh.
+                                 Can be tried as an alternative if eigsh fails,
+                                 e.g., for very large matrices (dim > 5e6).
+                     eigsh       Use eigsh from SciPy sparse matrix library.
+                     superlu_eigsh
+                                 Same as eigsh, but SuperLU is requested
+                                 explicitly. Enables detailed timing statistics,
+                                 as with other custom eigsh solvers.
+                     umfpack_eigsh
+                                 Like eigsh, but uses umfpack instead of SuperLU
+                                 for matrix inversion. Recommended for large
+                                 matrices. Falls back to SuperLU if Scikit
+                                 UMFPACK is not available. REQUIRES available
+                                 scikit-umfpack and a suitable SciPy version.
+                     pardiso_eigsh
+                                 Like umfpack_eigsh, but uses Intel MKL PARDISO
+                                 instead. REQUIRES pyMKL package.
+                     cupy_eigsh  Alternative implementation of the eigsh
+                                 solver in python. Uses CUDA libraries for
+                                 Lanczos iteration (on GPU) and PARDISO to
+                                 SuperLU for matrix inversion, depending on
+                                 availability. REQUIRES CUDA libraries and
+                                 CuPy package.
+                     jax_eigh    Uses the JAX eigh solver. First converts
+                                 sparse matrices to dense. Extremely memory
+                                 inefficient! Fails if not enough VRAM
+                                 can be allocated. Use the 'gpus' option
+                                 to reduce the number of workers on the GPU
+                                 if this happens. This solver is best suited
+                                 for a large number of 'neig'. REQUIRES jax
+                                 package.
+                     auto        Decision based on script type. Uses
+                                 'pardiso_eigsh' for kdotpy 1d if available,
+                                 otherwise uses 'eigsh' for all scripts.
+                                 Suggests alternative solvers if they could
+                                 be faster. (default; alias: automatic)
+diag_solver_worker_type
+                   Sets the parallelization strategy for solver workers.
+                   Options:
+                     process    Use a process pool for solver workers.
+                                Recommended strategy for most solvers.
+                     thread     Use a thread pool in the main process for solver
+                                workers. Recommended for CUDA based solver
+                                'cupy_eigsh' for optimal GPU workload.
+                     none       No parallel execution of the solve step. Every
+                                solve task is executed serially in the main
+                                thread. Recommended for debugging.
+                     auto       Decision based on 'diag_solver'.
+                                (default; alias: automatic)
+diag_solver_cupy_dtype
+                   Sets the data type for the cupy solver. Options:
+                     single     Uses complex numbers with single float
+                                precision. This leads to a large speed boost on
+                                GPUs with TensorCores. Precision of eigenvalues
+                                is worse (on the order of 10 µeV).
+                     double     Uses complex numbers with double float
+                                precision. Solution speed and precision of
+                                eigenvalues comparable to other solvers.
+                                Medium speed boost expected on GPUs with modern
+                                FP64-TensorCores (e.g. Nvidia A100). (default)
+diag_solver_cupy_iterations
+                   Maximum number of Lanczos iteration steps for both precision
+                   options. If number of iterations is exceeded, fall back to
+                   better precision first or CPU based solver next. (default: 5)
+diag_solver_cupy_gemm_dim_thr
+                   Maximum dimension for matrix matrix multiplication in single
+                   precision mode. If problem size exceeds this value, the
+                   solution is split into multiple smaller problem sets. Smaller
+                   values can lead to worse solution speeds, larger values can
+                   lead to more numerical problems and fallback to slower double
+                   precision solver. The default value is 4e6, from initial
+                   tests.
+diag_save_binary_ddp
+                   Whether and how to save intermediate binary files for each
+                   DiagDataPoint (diagonalization data point). Possible choices:
+                     npz     The NumPy (compressed) binary file format; see
+                             https://numpy.org/doc/stable/reference/generated/numpy.lib.format.html
+                             (alias: numpy)
+                     h5      HDF5 data format. This requires the Python module
+                             h5py to be installed. See https://docs.h5py.org
+                             (alias: hdf5)
+                     false   Do not save intermediate files (default)
+                   Note: This configuration value is independent from the
+                   command line option 'tempout'. The npz and hdf5 formats are
+                   meant for permanent data storage, the 'tempout' files are
+                   only safe for immediate re-use and should not be used for
+                   long-term storage.
+err_unexpected_ignore
+                   In case an unexpected error is caught in an optional step of
+                   a script, the code may continue with further steps instead of
+                   terminating early. (default: true)
+err_unexpected_print_traceback
+                   In case an unexpected error was suppressed with
+                   'err_unexpected_ignore', print a full error traceback to
+                   stderr, as if the error was not caught (but continue
+                   running). (default: true)
+task_retries       Number of times a task is restarted after any exception has
+                   been raised. (default: 2)
+tasks_grouped      If set to true, all steps for a single DiagDataPoint are
+                   executed within the same worker/thread with the settings for
+                   the solve_ham step. Compared to the default mode (false),
+                   this involves less inter-worker data transfers (via
+                   "pickling") that could give rise to issues with very large
+                   eigenvectors. As such, the worker communication behaves
+                   similar to kdotpy versions < v0.72. (default: false)
+numpy_printprecision
+                   The number of digits of precision for NumPy array floating
+                   point output. (This output is used in verbose mode mostly.)
+                   The value is passed to numpy.set_printoptions(), see
+                   https://numpy.org/doc/stable/reference/generated/numpy.set_printoptions.html.
+                   The value has to be an integer >= 0. The number of digits
+                   shown does not exceed the number needed to uniquely define
+                   the values, e.g., 17 digits for 64-bit floating point
+                   numbers. (default: 6)
+numpy_linewidth    Sets the (approximate) line width for NumPy array output.
+                   (This output is used in verbose mode mostly.) The value is
+                   passed to numpy.set_printoptions(), see
+                   https://numpy.org/doc/stable/reference/generated/numpy.set_printoptions.html.
+                   The value has to be an integer >= 0. The output is always at
+                   least one column, so small values may be exceeded.
+                   (default: 200)
+job_monitor_limit  If the number of data points is smaller than this value, show
+                   the full job monitor with information about the intermediate
+                   steps. Otherwise, show the simple in-line progress indicator.
+                   For the value 0, always show the simple progress indicator.
+                   (default: 101)
+band_align_exp     Value of the exponent in the minimization function of the
+                   'error' in the band alignment algorithm. A numerical value
+                   equal to e means that sum(|DeltaE|^e) is minimized.
+                   Alternatively, if the special value 'max' is used, then the
+                   minimization function is max(|DeltaE|). (default: 4)
+band_align_ndelta_weight
+                   Coefficient of the penalty for reduction of the number of
+                   bands in the band alignment algorithm. The higher this value,
+                   the more the algorithm 'resists' changes in the number of
+                   bands. The value may not be negative (however, 0 is allowed),
+                   and too high values should be avoided. It is recommended to
+                   use the default value unless the band alignment algorithm
+                   does not proceed correctly. (default: 20.0)
+band_char_node_threshold
+                   In the band character algorithm, this value times the
+                   resolution (zres) is the minimum value the wave function
+                   should reach such that a node (zero) is counted. (default:
+                   1e-6)
+band_char_orbital_threshold
+                   In the band character algorithm, the maximum value for the
+                   probability density (|psi|^2) in an orbital for the
+                   probability density to be considered zero. In that case, the
+                   orbital content of that orbital is ignored. (default: 5e-3)
+band_char_use_minmax
+                   In the band character algorithm, whether to use the 'new'
+                   node counting method, that counts flips between local
+                   extrema. If set to false, use the legacy method. (boolean
+                   value; default: true)
+band_char_make_real
+                   In the band character algorithm, whether to divide the
+                   orbital component by the complex phase factor at its maximum,
+                   so that the function becomes effectively real, prior to
+                   counting the nodes. If set to false, consider both real and
+                   imaginary part as is. (boolean value; default: false)
+bandindices_adiabatic_debug
+                   Whether to write the intermediate result for adiabatic band
+                   index initialization to a csv file. This is useful for
+                   debugging this algorithm, for example if the charge
+                   neutrality point ends up at an incorrect position. (boolean
+                   value; default: false)
+batch_float_format Format string for representation of float values being
+                   replaced in the command string. This is a standard %-style
+                   conversion, with the following addition: If a . (period) is
+                   added to the end, for example '%f.', apply the smart decimal
+                   option, i.e., strip superfluous zeros at the end, but keep
+                   the decimal point if the value is integer. Useful examples
+                   are, among others: %s, %f, %f., %.3f, %g. (default: %s)
+batch_stderr_extension
+                   Extension for the file, that kdotpy batch writes stderr to.
+                   (default: txt)
+batch_stdout_extension
+                   Extension for the file, that kdotpy batch writes stdout to.
+                   (default: txt)
+berry_dk           Momentum step size (in nm^-1) for calculating the derivative
+                   of the Hamiltonian in the calculation of the Berry curvature
+                   as function of momentum. It does not apply to the Berry
+                   curvature calculation in Landau-level mode. The value must be
+                   positive. (default: 1e-3)
+berry_ll_simulate  Whether to use simulated Berry curvature (more accurately:
+                   Chern numbers) for Berry / Hall output, for kdotpy ll,
+                   instead of the calculated one. The calculated value may
+                   sometimes show artifacts that cannot be easily resolved by
+                   increasing number of eigenstates for example. The simulated
+                   Berry curvature (observable 'berrysim') is set to exactly 1
+                   for all states at nonzero magnetic field. (boolean value;
+                   default: false)
+                   Hint: One may do a comparison by doing the calculation twice
+                   with settings 'true' and 'false', respectively. The output is
+                   written to different file names as to ease the comparison.
+bhz_allow_intermediate_bands
+                   Whether to allow a non-contiguous set of A bands. By default
+                   (false), do not allow B bands in between the A bands. If set
+                   to true, relax this restriction. This only takes effect if
+                   the input are band labels, e.g., "bhz E1 H1 L1". It does not
+                   apply to numeric input (e.g., "bhz 2 2"), which is a
+                   contiguous set by definition. NOTE: Setting True is
+                   experimental, it may cause unexpected errors. (boolean value;
+                   default: false)
+bhz_points         Number of horizontal data points for the BHZ dispersion plot.
+                   (default: 200)
+bhz_gfactor        Whether to output dimensionless g factors in the BHZ output
+                   (tex file). If set to false (default), output dimensionful
+                   quantities 'G' in meV / T. (boolean value; default: false)
+bhz_abcdm          Whether to output (tex file) a four-band BHZ model in
+                   'standard form', using coefficients A, B, C, D, M. If this
+                   cannot be done, use the generic form instead. (boolean value;
+                   default: false)
+bhz_ktilde         If BHZ is done at a nonzero momentum value k0, whether to
+                   express the Hamiltonian in the TeX output as shifted momentum
+                   tilde{k} = k - k0. If set to false, express it in terms of
+                   unshifted momentum k. This option has no effect for BHZ done
+                   at k0 = 0. (boolean value; default: true)
+bhz_plotcolor      Colour of the BHZ dispersion in the BHZ output file. It may
+                   be a single matplotlib colour, a pair separated by a comma
+                   (separate colours for each block), or a triplet separated by
+                   commas (one block, other block, states without specific
+                   block). (default: red,blue,black, legacy value: red)
+bhz_plotstyle      Style of the BHZ dispersion in the BHZ output file. It may
+                   be a single matplotlib line style, a pair separated by a
+                   comma (separate styles for each block), or a triplet
+                   separated by commas (one block, other block, states without
+                   specific block). Examples are 'solid', 'dashed', 'dotted'.
+                   (default: dotted, legacy value: dashed,dotted,solid)
+cnp_legacy_method  Whether to use the legacy algorithm for finding the charge
+                   neutrality point, that was the default for versions < 0.89.
+                   If false (default for versions ≥ 0.89), use the updated
+                   algorithm. (boolean value; default: false)
+color_bindex       Colormap for band index. For band index only, the range of
+                   the observable is adjusted to the number of colours in the
+                   colormap. (default: tab21posneg; note: for the 'old' set of
+                   colours, use 'tab20alt')
+color_dos          Colormap for density of states (default: Blues)
+color_energy       Colormap for energy plot (2D dispersion) (default: jet)
+color_idos         Colormap for integrated density of states (default: RdBu_r)
+color_indexed      Colormap for indexed (discrete) observables
+                   (default: tab20alt,tab20)
+color_indexedpm    Colormap for indexed (discrete) observables using a 'dual'
+                   colour scale, such as 'llindex.sz' (default: tab20)
+color_ipr          Colormap for IPR observables (default: inferno_r)
+color_localdos     Colormap for local density of states (default: cividis,jet)
+color_posobs       Colormap for observables with positive values
+                   (default: grayred)
+color_shadedpm     Colormap for continuous positive observables using a 'dual'
+                   colour scale, such as 'y2.isopz' (default: bluereddual)
+color_sigma        Colormap for 'sigma observables' (standard deviation)
+                   (default: inferno_r)
+color_symmobs      Colormap for observables with a symmetric (positive and
+                   negative) range of values (default: bluered)
+color_threehalves  Colormap for observables with range [-3/2, 3/2]
+                   (default: yrbc)
+color_trans        Colormap for transition plots (default: hot_r)
+color_wf_zy        Colormap for wavefunction plot |psi(z, y)|^2 (default: Blues)
+csv_style          Formatting for csv output. Possible values:
+                     csvpandas    Comma separated values using pandas module
+                     csvinternal  Comma separated values using internal function
+                     csv          Choose 'csvpandas' if pandas is available,
+                                  otherwise choose 'csvinternal' (default)
+                     align        Align values in columns in the text file
+csv_multi_index    Determines how a multi-index (LL index, band index) is
+                   formatted in csv output. Possible values:
+                     tuple        As a tuple (##, ##)  (default)
+                     llindex      Only the LL index
+                     bindex       Only the band index
+                     split        LL index on first row, band index on second
+                                  row (alias: tworow)
+                     short        Short version of tuple ##,## (space and
+                                  parentheses are omitted)
+csv_bandlabel_position
+                   Location of the band labels in the 'by-band' CSV output.
+                   Possible values:
+                     top      At the very top, above the other column headings.
+                              (default; alias: above)
+                     second   Between the data and the other column headings.
+                              (alias: between)
+                     bottom   At the bottom, below the data. (alias: below)
+dos_interpolation_points
+                   The minimal number of points on the horizontal axis for some
+                   DOS and Berry curvature (Hall conductivity) plots. When the
+                   calculated number of (k or b) points is smaller, then perform
+                   interpolation to at least this number. Must be an integer; if
+                   equal to 0, do not interpolate. (default: 100)
+dos_energy_points  The minimal number of points on the energy axis for some DOS
+                   and Berry curvature (Hall conductivity) plots. An energy
+                   resolution will be chosen so that the energy interval spans
+                   at least this many values. (default: 1000)
+dos_convolution_points
+                   The minimal number of points in the energy variable taken
+                   when applying a broadening function (a convolution operation)
+                   to an integrated DOS. If the energy range contains fewer
+                   points than this value, the integrated DOS is interpolated.
+                   Note that this value affects the 'dummy variable' of the
+                   convolution integral only, i.e., the internal accuracy of the
+                   (numerical) integration. The broadened integrated DOS (the
+                   result) will always be with respect to the same energies as
+                   the input. (default: 200)
+dos_print_validity_range
+                   Print the lower and upper bound of the validity range for DOS
+                   and IDOS. If the lower bound (first value) is larger than the
+                   upper bound (second value), then the DOS and IDOS are invalid
+                   for all energies. See also 'plot_dos_validity_range'.
+                   (boolean value; default: true)
+dos_print_momentum_multiplier
+                   The momentum range can be extended by using a multiplier that
+                   takes into account the part of momentum space not explicitly
+                   calculated. Examples: If only positive momenta are
+                   calculated, simulate the negative values by multiplying by 2;
+                   or, if in polar coordinates the calculation was done from 0
+                   to 90 degrees angle, multiply by 4 for a full circle. This
+                   setting determines whether this multiplicative factor should
+                   be printed to the standard output. (boolean value; default:
+                   false)
+dos_quantity       The quantity in which to express density of states. Prior to
+                   version v0.95, this was done using the command line arguments
+                   densitypnm, densityecm, etc.
+                   Possible values:
+                     k    Occupied volume in momentum space; units 1/nm^d
+                          (alias: momentum)
+                     p    Density of particles/carriers (n or dn/dE); units
+                          1/nm^d (default; alias: n, particles, carriers,
+                          cardens)
+                     s    Density of states (IDOS or DOS); units 1/nm^d. The
+                          only difference with 'p' is the way the quantities are
+                          labelled. (alias: dos, states)
+                     e    Density of charge (sigma or dsigma/dE); units e/nm^d
+                          (alias: charge)
+                   (The exponent d in the unit is adjusted according to the
+                   dimensionality.)
+dos_unit           The length units used for density of states. Prior to version
+                   v0.95, this was done using the command line arguments
+                   densitypnm, densityecm, etc.
+                   Possible values:
+                     nm   Units of 1/nm^d, e/nm^d (default)
+                     cm   Units of 1/cm^d, e/cm^d
+                     m    Units of 1/m^d, e/m^d
+                   In the output, the density values are also scaled to a
+                   suitable 'power of ten'. The exponent d in the unit is
+                   adjusted according to the dimensionality.
+dos_strategy_no_e0 The strategy to follow when trying to extract DOS or IDOS
+                   from the band structure, when the zero energy E0 is not well
+                   defined. Possible values:
+                     strict   Neither DOS nor IDOS can be extracted.
+                     dos      DOS can be extracted, but IDOS cannot (default).
+                     ignore   Both DOS and IDOS can be extracted, ignoring the
+                              fact that E0 may lie at an arbitrary energy value.
+                   When E0 is defined (either manually or automatically), the
+                   extraction of DOS and IDOS is always possible, regardless of
+                   this setting.
+fig_matplotlib_style
+                   Matplotlib style file for changing the properties of plot
+                   elements. This may be a file in the configuration directory
+                   (~/.kdotpy) or in the working directory, or a built-in
+                   matplotlib style. (default: kdotpy.mplstyle)
+fig_hsize, fig_vsize
+                   Horizontal and vertical size of the figures, in mm. (default:
+                   150, 100, respectively)
+fig_lmargin, fig_rmargin, fig_bmargin, fig_tmargin
+                   Figure margins (left, right, bottom, top), i.e., the space in
+                   mm between the figure edge and the plot area. (default: 20,
+                   4, 12, 3, respectively)
+fig_charlabel_space
+                   Vertical space for the character labels in the dispersion
+                   plot, in units of the font size. To avoid overlapping labels,
+                   use a value of approximately 0.8 or larger. (default: 0.8)
+fig_colorbar_space Space reserved for the colour bar legend in mm (default: 30)
+                   In other words, this is the distance between the right-hand
+                   edges of the figure and the plot if a colour bar is present.
+                   It 'replaces' the right margin.
+fig_colorbar_margin
+                   Space between the right-hand edge of the plot and the colour
+                   bar legend, in mm. This space is taken from the colour bar
+                   space (set by fig_colorbar_width), so it does not affect the
+                   right-hand edge of the plot. (default: 7.5)
+fig_colorbar_size  Width of the actual colour bar in mm (default: 4)
+fig_colorbar_method
+                   Way to place the colour bar; one of the following options:
+                     insert   Take space inside the existing plot; keep the
+                              figure size, but decrease the plot size. (default)
+                     extend   Add extra space; keep the plot size but increase
+                              the figure size.
+                     file     Save into a separate file. The original figure is
+                              not changed.
+fig_colorbar_labelpos
+                   Method to determine the position of the label of the colour
+                   bar. One of the following options:
+                     legacy   The 'old' method, using colorbar.set_label plus a
+                              manual shift.
+                     xaxis    As label for the 'x axis', directly below the
+                              the colour bar.
+                     yaxis    As label for the 'y axis', vertically up on the
+                              right-hand side.
+                     center   Centered in the whole space allocated for the
+                              colour bar, including margins; very similar to
+                              'legacy' for default settings of the colour bar
+                              size and margins. (default)
+                     left     Left aligned with the left border of the colour
+                              bar.
+fig_colorbar_abstwosided
+                   Whether a shaded dual colour bar, where the vertical value is
+                   the absolute value of the observable, should show the
+                   observable itself, with values running from -max to max (if
+                   set to true; default). Otherwise show the absolute value,
+                   running from 0 to max. (boolean value; default: true)
+fig_extend_xaxis   Relative extension of the horizontal plot range for
+                   dispersion and magnetic field dependence. For example, a
+                   value of '0.05' means 5% of the range is added left and right
+                   of the minimum and maximum x value (where x is k or B),
+                   respectively. This does not affect the range if the command
+                   line argument 'xrange' is used. Use '0' to not extend the
+                   plot range. The value may not be negative. (default: 0.05)
+fig_inset_size     Size (width and height) of the inset legend in mm. Values
+                   smaller than 30 are not recommended. (default: 30)
+fig_inset_margin   Space between inset edge and plot edge in mm. (default: 3)
+fig_inset_color_resolution
+                   Number of colour gradations along each axis for the RGB
+                   (inset) legend. Do not change unless file size is an issue.
+                   (default: 20)
+fig_legend_fontsize
+                   Specify the font size of the legend. May also be set to
+                   'auto' for automatic; this yields a font size of 8 for RGB
+                   (inset) legend, 10 for other legend or colorbars (may be
+                   subject to settings in matplotlibrc and/or style files).
+                   (default: auto)
+fig_spin_arrow_length
+                   Arrow length in spin plots. The value is the length in mm for
+                   arrows representing spin value 0.5 or a direction.
+                   (default: 5)
+fig_max_arrows     Maximum number of arrows in a vector plot in each dimension.
+                   The value 0 means no limit. (default: 20)
+fig_arrow_color_2d Color of the arrows in a 2D vector plot. This must be a
+                   valid matplotlib colour. (default: #c0c0c0)
+fig_ticks_major    Strategy to determine the major ticks in the plots. Possible
+                   choices:
+                     none    No major ticks
+                     auto    Determine number of ticks automatically (based on
+                             plot size). (default)
+                     fewer   A few ticks per axis (typically 3)
+                     normal  A moderate amount of ticks per axis (typically 6)
+                     more    Many ticks per axis (typically 12)
+                   One can use different choices for the horizontal and vertical
+                   axis, as follows: 'fig_ticks_major=normal,fewer'
+fig_ticks_minor    Strategy to determine the minor ticks in the plots. Possible
+                   choices:
+                     none    No minor ticks (default)
+                     auto    Determine automatically (matplotlib's algorithm)
+                     fewer   Few minor ticks (major interval divided by 2)
+                     normal  Moderately many ticks (major interval divided by 4
+                             or 5).
+                     more    Many minor ticks (major interval divided by 10)
+                   One can use different choices for the horizontal and vertical
+                   axis, as follows: 'fig_ticks_minor=fewer,none'
+fig_unit_format    Opening and closing bracket of the units in axis and legend
+                   labels. (default: [])
+lattice_regularization
+                   Enables or disables lattice regularization. The settings
+                   'true' and 'false' correspond to the obsolete command-line
+                   arguments 'latticereg' and 'nolatticereg', respectively. The
+                   recommended value and default value is 'false'. Note that
+                   for older kdotpy versions (kdotpy v0.xx), the default value
+                   was 'true' for compatibility reasons.
+                   (boolean value; default: false)
+lattice_zres_strict
+                   Enables or disable strict check of commensurability of z
+                   resolution with thickness of the layers, i.e., whether the
+                   thicknesses are integer multiples of the z resolution. If
+                   they are incommensurate, quit with an error if strict
+                   checking is enabled. If disabled, change the thicknesses to
+                   match the z resolution and raise a warning. (boolean value;
+                   default: true)
+magn_epsilon       Numeric value that determines whether small values near zero
+                   need to be inserted if the grid contains magnetic fields. The
+                   value zero means disabling this feature. Otherwise, +/- the
+                   absolute value of magn_epsilon is inserted at either side of
+                   B = 0, whichever side (positive or negative) is included in
+                   the range. If negative, insert the values only if the range
+                   is two-sided. The motivation for including this option is to
+                   reduce some plot artifacts for ranges that contain positive
+                   and negative magnetic fields. For this option to be
+                   effective, it might also be necessary to set the 'split'
+                   parameter to a small value. (default: -1e-4)
+selfcon_acceptable_status
+                   Maximum status level for the result of the self-consistent
+                   Hartree calculation to be considered valid. Possible values:
+                     0   Successful
+                     1   Calculation skipped or aborted (default)
+                     2   Did not converge, but convergence is likely after more
+                         iterations
+                     3   Did not converge, convergence cannot be estimated or is
+                         unlikely
+                     4   Failed
+selfcon_check_chaos_steps
+                   Number of previous iterations used for the detection of
+                   chaotic behaviour. If this value is set to n, we say chaos
+                   occurs at iteration i if the previous V_j closest to V_i are
+                   more than n iterations ago, i.e., i - j > n. When chaos is
+                   detected, adjust the time step if selfcon_dynamic_time_step
+                   is set to true. (default: 4)
+selfcon_check_orbit_steps
+                   Number of previous iterations used for the detection of
+                   periodic orbits. We say a periodic orbit occurs at iteration
+                   i if the previous V_j closest to V_i show a regular pattern
+                   like j - i = 2, 4, 6, 8; the value n set here is the minimum
+                   length of the regular pattern. When a periodic orbit is
+                   detected, adjust the time step if selfcon_dynamic_time_step
+                   is set to true. (default: 4)
+selfcon_convergent_steps
+                   Number of consecutive convergent steps (iteration steps where
+                   the convergence condition is met) required for the self-
+                   consistent Hartree calculation to be considered successful.
+                   This prevents accidental convergence which could lead to a
+                   spurious solution. (default: 5)
+selfcon_debug      Whether to enable debug mode for the self-consistent Hartree
+                   calculation. In debug mode, write temporary files and provide
+                   traceback for all exceptions (also KeyboardInterrupt) within
+                   the iteration loop, which is useful for debugging. If debug
+                   mode is disabled, then do not write temporary files and
+                   continue on SelfConError and KeyboardInterrupt exceptions.
+                   (boolean value; default: false)
+selfcon_diff_norm  Method that defines a measure of convergence for the self-
+                   consistent calculation. This method is essentially a function
+                   applied to the difference of potentials of the last two
+                   iteration steps. The result, a nonnegative value, is compared
+                   to the convergence criterion. Possible values:
+                     max   The maximum of the difference. Also known as supremum
+                           norm or L-infinity norm.
+                     rms   The root-mean-square of the difference. This is the
+                           L2 norm. (default)
+selfcon_dynamic_time_step
+                   Whether the "time" step for the self-consistent calculation
+                   is adapted automatically between iterations. If set to false,
+                   the time step stays the same between iterations (boolean
+                   value; default: false).
+selfcon_erange_from_eivals
+                   Whether to use the eigenvalues from first diagonalization
+                   result to determine the energy range used for calculating the
+                   density of states for the self-consistent calculation. If
+                   false, the energy range given in the command line is used
+                   instead. (boolean value; default: false).
+selfcon_full_diag  Whether to use the full-diagonalization approach for the
+                   self-consistent Hartree calculation. If true, use the full-
+                   diagonalization approach that calculates all conduction band
+                   states to determine density as function of z. If false, use
+                   the standard mode that calculates bands around the CNP. The
+                   latter is significantly faster, but the results are based on
+                   an implausible assumption on the density at the CNP. (boolean
+                   value; default: true)
+selfcon_ll_use_broadening
+                   Whether to enable broadening during self-consistent
+                   calculation in LL mode.
+                   This can lead to bad convergence behaviour (or no convergence
+                   at all, depending on selected broadening), but results in
+                   more accurate Hartree potentials for the given broadening.
+                   This does not affect the broadening applied to the main
+                   diagonalization/postprocessing after the self-consistent
+                   calculation has finished. (boolean value; default: false)
+selfcon_energy_points
+                   The minimal number of points on the energy axis for the self-
+                   consistent calculation. An energy resolution will be chosen
+                   so that the energy interval spans at least this many values.
+                   This number may be fairly high without performance penalty.
+                   (default: 1000)
+selfcon_min_time_step
+                   The minimal value for the "time" step for the self-consistent
+                   calculation. If selfcon_dynamic_time_step is set to true,
+                   the time step can never get lower than this value. Allowed
+                   values are between 0 and 1. (default: 0.001)
+selfcon_potential_average_zero
+                   Shift the potential such that its average will be zero at
+                   each iteration of the self-consistent calculation. Enabling
+                   this option is recommended for reasons of stability and for
+                   consistency of the output. (boolean value; default: true)
+selfcon_symmetrization_constraint
+                   Constraint on how the symmetry is checked and symmetrization
+                   performed on multiple quantities when solving the Poisson
+                   equation. When symmetry norm is below threshold the quantity
+                   is always fully symmetrized over whole layer stack (except
+                   for 'never'). Possible values:
+                     never   Symmetry will not be checked. No symmetrization is
+                             performed.
+                     strict  Symmetry is checked over whole layer stack.
+                             (default)
+                     loose   Symmetry is checked over the well region only. This
+                             method is preferred for asymmetric layer stacks.
+selfcon_use_init_density
+                   Whether a uniform density profile (consistent with the total
+                   carrier density) is applied in the initialization of the
+                   self-consistent Hartree calculation. If enabled, calculate
+                   the potential and apply it to the Hamiltonian in the first
+                   iteration. If disabled, use the Hamiltonian with zero
+                   potential, unless an initial potential is loaded from a file.
+                   (boolean value; default: false
+transitions_min_amplitude
+                   Minimum amplitude to consider for transitions. The lower this
+                   number, the larger the number of data points and the larger
+                   the data files and plots. (default: 0.01)
+transitions_min_deltae
+                   Minimum energy difference in meV to consider for transitions.
+                   This value is proportional to a minimal frequency. The
+                   smaller this number, the larger the number of data points and
+                   the larger the data files and plots. (default: 0.1)
+transitions_max_deltae
+                   Maximum energy difference in meV of transitions, i.e., upper
+                   limit of the vertical axis (filtered transitions plot only).
+                   If set to 0, determine the vertical scale automatically.
+                   (default: 0)
+transitions_dispersion_num
+                   Number of transitions to include in the dispersion or B
+                   dependence (LL fan) plot. If set to n, the transitions with
+                   n highest transitions rates will be shown.  If set to 0, show
+                   an unlimited number of transitions. (default: 4)
+transitions_broadening_type
+                   Shape of the broadening function used for broadening the
+                   transitions in the absorption plot. Possible choices:
+                     step        A step function (alias: delta)
+                     lorentzian  Lorentzian function (Cauchy distribution),
+                                 scale parameter gamma, which is the half-width
+                                 at half-maximum. (default; alias: lorentz)
+                     gaussian    Gaussian function, scale parameter sigma, which
+                                 is the standard deviation. (alias: gauss,
+                                 normal)
+                     fermi       Fermi function (thermal distribution), scale
+                                 parameter is energy. (alias: logistic, sech)
+                     thermal     Fermi function (thermal distribution), scale
+                                 parameter is temperature.
+                   NOTE: Change in versions >= 0.79. The types 'fermi' and
+                   'thermal' are no longer aliases. They have the same shape,
+                   but the scale ('width') parameter is treated differently, as
+                   energy and temperature, respectively.
+                   The broadening functions for the absorption are the
+                   probability density functions for all of these choices.
+transitions_broadening_scale
+                   Scale parameter of the broadening function. This may be an
+                   energy (in meV) or a temperature (in K) that determines the
+                   amount of broadening (i.e., its 'width'). (default: 2.5)
+transitions_all_if_filtered
+                   Output plot and table of all transitions if a carrier density
+                   is set. If false, suppress output of all transitions if
+                   filtered transitions are being requested. (boolean value;
+                   default: false)
+transitions_spectra
+                   (experimental)
+                   Output spectral plots and tables if a carrier density is set.
+                   If false, skip (time consuming) spectra calculation.
+                   (boolean value, default: false)
+transitions_plot   Output transition plot. If false, do not create and save a
+                   a transitions plot. (boolean value, default: true)
+plot_constdens_color
+                   The colour of the curves in the 'constdens' plots. The value
+                   must be a valid matplotlib colour. (default: blue)
+plot_dispersion_default_color
+                   The uniform colour of the dispersion curves, if there is no
+                   colour scale set for the given observable (or if no
+                   observable is set). The value must be a valid matplotlib
+                   colour. (default: blue)
+plot_dispersion_energies
+                   Plot special energies, e.g., charge-neutrality point, Fermi
+                   energy/chemical potential at zero and finite density in
+                   dispersion plots. (boolean value; default: true)
+plot_dispersion_energies_color
+                   The line colour for special energies. The value must be a
+                   valid matplotlib colour. If left empty, take lines.color from
+                   matplotlibrc or a style file. (default: black)
+plot_dispersion_parameter_text
+                   Write an indication in the plot for constant parameter
+                   values, e.g., when plotting along kx for a nonzero ky value,
+                   write "For k_y = <value>". (boolean value; default: true)
+plot_dispersion_stack_by_index
+                   If enabled, make sure the data with the lowest band or
+                   Landau-level index is shown on top, to make sure the 'most
+                   interesting data' (low-index states) is not obscured by 'less
+                   interesting data' (high-index states). Otherwise, the plot
+                   function uses the default plot stacking order: the data is
+                   then drawn simply in the order by which it is processed.
+                   (boolean value; default: false)
+plot_dos_color     Colour of the curves in the (integrated) density of states
+                   (IDOS/DOS) plots. The value must be a valid matplotlib
+                   colour. (default: blue)
+plot_dos_energies  Plot special energies, e.g., charge-neutrality point, Fermi
+                   energy/chemical potential at zero and finite density in
+                   density (DOS) plots. (boolean value; default: true)
+plot_dos_fill      Fill the area between the curve and zero in the DOS plot (not
+                   integrated DOS). (boolean value; default: false)
+plot_idos_fill     Fill the area between the curve and zero in the integrated
+                   DOS plot. (boolean value; default: false)
+plot_dos_units_negexp
+                   Use negative exponents in DOS units in density plots. If set
+                   to true, write "nm^-2" instead of "1/nm^2", for example.
+                   (boolean value; default: false)
+plot_dos_validity_range
+                   Shade the area in the (integrated) DOS plot where the value
+                   is expected to be incorrect due to missing data (due to
+                   momentum cutoff). (boolean value; default: true)
+plot_dos_vertical  Plot the (integrated) DOS sideways, so that energy is plotted
+                   on the vertical axis. The vertical scale will match the
+                   dispersion plot, so that these figures can be put
+                   side-by-side with a common axis. (boolean value; default:
+                   true)
+plot_ecnp          Plot the charge neutral energy as function of k or B. This is
+                   the boundary between "electron" and "hole" states (positive
+                   and negative band indices, respectively). (boolean value;
+                   default: false)
+plot_rasterize_pcolormesh
+                   Whether to rasterize plot elements created with pcolormesh
+                   from matplotlib. This is used primarily for two-dimensional
+                   color plots with kdotpy ll when one uses quadratic stepping
+                   for the magnetic field values. Rasterization leads to
+                   improved performance both in creating the plots as well as in
+                   rendering them with a pdf viewer. The resolution can be
+                   controlled with the matplotlibrc parameters 'figure.dpi' and
+                   'savefig.dpi'. If the old behaviour is desired, i.e., that
+                   the data is rendered as vector graphics, set the value of
+                   plot_rasterize_pcolormesh to 'false'. (boolean value;
+                   default: true)
+plot_rxy_hall_slope
+                   Plot the Hall slope R_xy = B / (n e), where B is magnetic
+                   field, n is density and e is electron charge, in the plots
+                   for R_xy (rxy-constdens.pdf) as a dashed line. (boolean
+                   value; default: true)
+plot_sdh_markers   Whether to show markers for the period of the SdH
+                   oscillations in the 'constdens' plot (both 'normal' and 'SdH'
+                   versions). The markers are placed at the values for which
+                   1 / B is a multiple of e / (2 pi hbar n), where n is the
+                   density. (boolean value; default: true)
+plot_sdh_markers_color
+                   Colour of the SdH markers in the 'constdens' plots. The value
+                   must be a valid matplotlib colour. (default: red)
+plot_sdh_scale_amount
+                   The maximum number of SdH oscillations to be shown in the SdH
+                   plot. If set to a nonzero value, the scale on the horizontal
+                   axis is magnified to this amount of SdH oscillations. The
+                   scale is never shrunk, so there may be fewer SdH oscillations
+                   on the axis. The 'constdens' plot linear in B is unaffected.
+                   If set to 0 (default), do not scale the axis. A typical
+                   useful nonzero value is 20.
+plot_transitions_labels
+                   Show some labels in transitions plot. (boolean value;
+                   default: true)
+plot_transitions_quantity
+                   Which quantity to use for colouring in the transitions plot.
+                   Possible choices:
+                     amplitude   'Raw' amplitude gamma from Fermi's golden rule
+                     rate        Transition rate density, n gamma (f2 - f1)
+                                 (default; alias: rate_density)
+                     occupancy   Occupancy difference f2 - f1
+                     deltae      Energy difference |E2 - E1| in meV
+                     freq        Corresponding frequency in THz (alias:
+                                 freq_thz)
+                     lambda      Corresponding wave length in µm (alias:
+                                 lambda_µm, lambda_um)
+                     absorption  Absorption (relative attenuation of intensity)
+                                 A
+plot_transitions_frequency_ticks
+                   Plot frequency ticks at the left and right energy axis for
+                   transitions plots. (boolean value; default: true)
+plot_transitions_max_absorption
+                   Upper limit of the colour scale in the transitions absorption
+                   plot. For the relative absorption, use [-value, value] as the
+                   colour range. (default value: 0.03)
+plot_wf_orbitals_realshift
+                   Phase-shift the orbital functions to purely real values
+                   before plotting. This results in a single line plot per
+                   orbital with consistent amplitudes and signs. The actual
+                   phases are still given at the right side of the figure. Uses
+                   straight/dashed lines for +/- angular momentum orbitals.
+                   (default: false)
+plot_wf_orbitals_order
+                   Order of the orbitals in the legend, for wave function plot
+                   style 'separate'. Possible choices:
+                     standard  Γ6,+1/2  Γ8,+1/2  Γ7,+1/2
+                               Γ6,-1/2  Γ8,-1/2  Γ7,-1/2
+                               Γ8,+3/2  Γ8,-3/2           (default)
+                     paired    Γ6,+1/2  Γ6,-1/2  Γ7,+1/2
+                               Γ8,+1/2  Γ8,-1/2  Γ7,-1/2
+                               Γ8,+3/2  Γ8,-3/2
+                     table              Γ8,+3/2
+                               Γ6,+1/2  Γ8,+1/2  Γ7,+1/2
+                               Γ6,-1/2  Γ8,-1/2  Γ7,-1/2
+                                        Γ8,-3/2
+                    For the six-orbital basis, the Γ7 states are omitted.
+plot_wf_mat_label_rot
+                   Rotation of material labels inside the layers (deg). Can be
+                   used to fit long labels in thin layers. (default: 0)
+plot_wf_mat_min_thick_label
+                   Minimum layer thickness as ratio of full stack, which is
+                   required to show the material label. (default: 0.15)
+plot_wf_zy_format  File format for wavefunction plots |psi(z, y)|^2. Possible
+                   choices:
+                     pdf       Multi-page PDF if possible, otherwise separate
+                               PDF files. (default)
+                     png       Separate PNG files.
+                     pngtopdf  Separate PNG files are converted and merged into
+                               a multi-page PDF. Requires the 'convert' command
+                               to be available. (alias: png_to_pdf)
+plot_wf_zy_bandcolors
+                   Colour model for the wavefunction plots |psi(z, y)|^2
+                   separated by bands. Possible choices:
+                     hsl   Hue-saturation-lightness. The colour (hue) is
+                           determined by the relative content of the bands, the
+                           saturation and lightness by the density.
+                     hsv   Hue-saturation-value. Like hsl, the colour (hue) is
+                           determined by the relative content of the bands, the
+                           saturation by the density, and the value is equal to
+                           1.
+                     rgb   Red-green-blue. The red, green, and blue channels are
+                           determined by the contents of the bands.
+                   NOTE: This is not a colormap! For the absolute value without
+                   band content, use the colormap set by 'color_wf_zy'.
+plot_wf_zy_scale   Scaling method (colour scale normalization) for wavefunction
+                   plots |psi(z, y)|^2. Possible choices:
+                     separate   Normalize the colour scale for each wavefunction
+                                individually. (default)
+                     together   Normalize the colour scale for all wavefunctions
+                                collectively.
+plot_wf_y_scale    Scaling method for the vertical axis for wave function plots
+                   |psi(y)|^2. Possible choices:
+                     size       Determine scale from sample size (width in y
+                                direction. (default; alias: width)
+                     magn       Determine scale from magnetic field. For small
+                                fields, use the sample size.
+                     separate   Determine scale from the maximum of each wave
+                                function individually.
+                     together   Determine scale from the maximum of all wave
+                                functions collectively.
+plot_wf_delete_png If the wavefunction plots are saved in PNG format and
+                   subsequently converted to a single multi-page PDF, delete the
+                   PNG files if the conversion is successful. (boolean value;
+                   default: true)
+plot_wf_together_num
+                   For the wavefunction plot in 'together' style, plot this many
+                   wave functions. Must be a positive integer. (default: 12)
+table_berry_precision
+                   Precision (number of decimals) for floating point numbers,
+                   for the Berry curvature csv files. (default: 4)
+table_data_label_style
+                   Style for expressing data labels in generic two-dimensional
+                   csv output, such as density of states and Berry curvature.
+                   The label is positioned at the end of the first row with
+                   data. Possible choices: none (alias: false), raw, plain,
+                   unicode, tex (see table_dispersion_unit_style below). If
+                   none, do not write a label. (default: plain)
+table_data_unit_style
+                   Style for expressing the unit in generic two-dimensional csv
+                   output. Possible choices: none (alias: false), raw, plain,
+                   unicode, tex (see table_dispersion_unit_style below). If
+                   none, do not write a unit. Also, if 'table_data_label_style'
+                   is set to none, this option is ignored and no unit is
+                   written. (default: plain)
+table_dos_precision
+                   Precision (number of decimals) for floating point numbers,
+                   for the density of states csv files. (default: 8)
+table_dos_scaling  Whether to apply density scaling for csv output of densities.
+                   If false, use the native units (nm^-2 in two dimensions).
+                   Otherwise, use the same scaling as for plots. (boolean value;
+                   default: false)
+table_dos_units_negexp
+                   Use negative exponents in DOS units for csv output. If set to
+                   true, write "nm^-2" instead of "1/nm^2", for example.
+                   (boolean value; default: false)
+table_dispersion_precision
+                   Precision (number of decimals) for floating point numbers,
+                   for the dispersion csv files. Energy and momentum values may
+                   use a different number of decimals. (minimum: 2, default: 5)
+table_dispersion_data_label
+                   Whether to include the observable at the end of the first
+                   data row in a multi-dimensional dispersion csv table (e.g.,
+                   with two or three momentum variables). (boolean value;
+                   default: true)
+table_dispersion_units
+                   Whether to include units of the variables and observables in
+                   dispersion csv files. For a one-dimensional dispersion, these
+                   are included as second header row. For a multi-dimensional
+                   dispersion, the unit is added at the end of the first data
+                   row. (boolean value; default: true)
+table_dispersion_unit_style
+                   Style for expressing units. Possible choices:
+                     raw       'Without' formatting
+                     plain     Plain-text formatting using common symbols (e.g.,
+                               square is ^2 and Greek letters are spelled out)
+                     unicode   Formatting using 'fancy' Unicode symbols (e.g.,
+                               square is the superscript-2 symbol and Greek
+                               letters use their corresponding Unicode symbol).
+                     tex       LaTeX formatting
+                   (default: plain)
+                   NOTE: Even with 'raw' or 'plain', there may still be some
+                   non-ASCII symbols, for example µ.
+table_dispersion_obs_style
+                   Style for expressing observables/quantities. Possible
+                   choices: raw, plain, unicode, tex (see above). (default: raw)
+table_qz_precision Precision (number of decimals) for floating point numbers,
+                   for the 'Q(z)' (z-dependent quantity) csv files. (default: 5)
+table_extrema_precision
+                   Precision (number of decimals) for floating point numbers,
+                   for the extrema csv files. (default: 5)
+table_absorption_precision
+                   Precision (number of decimals) for floating point numbers,
+                   for the absorption csv files (associated with transitions).
+                   (default: 5)
+table_transitions_precision
+                   Precision (number of decimals) for floating point numbers,
+                   for the transitions csv files. (default: 3)
+table_transitions_ratecoeff_unit
+                   Unit for the rate coefficient for optical transitions.
+                   (default: nm^2/mV/ns)
+table_wf_files     Which type of files should be written for the wave function
+                   data. Possible choices:
+                     none      No files are written.
+                     csv       Write csv files only. (default)
+                     tar       Write csv files, pack them into a tar file.
+                     targz     Write csv files, pack them into a gzipped tar
+                               file (compression level 6). (alias: gz, gzip,
+                               tar.gz)
+                     zip       Write csv files, pack them into a zip file with
+                               'deflate' compression.
+                     zipnozip  Write csv files, pack them into a zip file
+                               without compression.
+                   For the archive options (tar, zip, etc.), delete the csv
+                   files if the archive has been written successfully; otherwise
+                   keep the csv files. Some options may be unavailable depending
+                   on the installed Python modules.
+table_wf_precision Precision (number of decimals) for floating point numbers,
+                   for the wave function csv files. (default: 5)
+wf_locations_exact_match
+                   If set to true (default), the wave function locations should
+                   match the momentum/magnetic field values exactly. If no exact
+                   match is found, skip the location ('old behaviour'). If set
+                   to false, find the nearest value to each location. (boolean
+                   value; default: true)
+wf_locations_filename
+                   Whether to label the wave function files using the position
+                   (momentum/magnetic field). If set to false, label with
+                   numbers. (boolean value; default: true)
+xml_omit_default_config_values
+                   If set to true, do not save all configuration values to the
+                   XML output file, but only the ones that are set to a value
+                   different than the default value. Otherwise save all values
+                   (default); this is recommended for reproducibility.
+                   (boolean value; default: false)
+xml_shorten_command
+                   If set to true, replace the script path in the <cmdargs> tag
+                   by 'kdotpy xx' (where xx = 1d, 2d, etc.) if typing 'kdotpy'
+                   on the command line refers to the kdotpy main script. For
+                   this, the main script (or a link to it) must be in the PATH
+                   environment variable; this is generally the case if kdotpy
+                   has been installed with pip. (boolean value; default: false)
+
+
+NOTE: Valid colormaps are all matplotlib colormaps (see, e.g.,
+https://matplotlib.org/tutorials/colors/colormaps.html), plus the following ones
+defined by kdotpy itself:
+bluered      Designed for symmetric observables; blue gray red.
+grayred      Designed for positive observables; gray red, like the top half of
+             colormap 'bluered'
+yrbc         Designed for observables with range -3/2 to 3/2, an extension of
+             colormap 'bluered';
+             cyan (-3/2) blue (-1/2) gray (0) red (1/2) yellow (3/2)
+yrbc2        Slightly modified version of colormap 'yrbc'
+tab20alt     The set of colours from the matplotlib colormap 'tab20' but in a
+             different order. This can be used with 'indexed' colours in order
+             to match the 'indexedpm' colours if set to 'tab20'.
+tab21posneg  Another set of colours derived from the matplotlib colormap
+             'tab20', such that the values +n and -n correspond to brighter and
+             darker shades of the same colour. For 0, use black. This colormap
+             is especially designed for band indices.
+bluereddual  Designed for use as dual colormap; blue to bluish gray, reddish
+             gray to red.
+
+NOTE: For colormaps, multiple instances may be given by separating them with
+commas. The colormaps are tried in order. If the first one is not a valid
+colormap, then try the second, etc. This construct is to provide colormaps that
+do not exist in all versions of matplotlib or have changed names.
+
+NOTE: A custom colormap may be used using 'file:colormap.txt' where colormap.txt
+is a colormap file. A colormap file should be simple text file, where each line
+is either a single matplotlib colour, or a combination of the form
+'value:color', where value is a number in the interval [0, 1]. The two forms may
+not be mixed in one file.
+A list of single colours creates a matplotlib colormap of type ListedColormap;
+this is useful for indexed colours, as no interpolation takes place. A list of
+'value:color' entries creates a LinearSegmentedColormap. This is a continuous
+colour gradient. Discontinuities at can be achieved by listing the value twice,
+e.g., '0.5:#ff0000' and '0.5:#0000ff' on two subsequent lines.
+Please refer to:
+https://matplotlib.org/tutorials/colors/colormap-manipulation.html
+https://matplotlib.org/api/colors_api.html
+
+NOTE: Boolean configuration options may have the following values (not case
+sensitive):
+for "True": yes, y, true, t, 1, enabled, on
+for "False": no, n, false, f, 0, disabled, off
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/erange.py b/kdotpy-v1.0.0/src/kdotpy/erange.py
new file mode 100644
index 0000000000000000000000000000000000000000..2760871d712d75f444ec2422aac19180d2c1597a
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/erange.py
@@ -0,0 +1,166 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+import sys
+
+def eres_automatic(erange, high_precision = False):
+	"""Determine energy resolution automatically
+
+	Arguments:
+	erange          Numpy array. The energy range is determined from the minimum
+	                and maximum value.
+	high_precision  True or False. If True, use a higher resolution.
+
+	Returns:
+	eres   Float. The resolution (step size) for the array of energies.
+	"""
+	emin = min(erange)
+	emax = max(erange)
+	esize = emax - emin
+	if esize < 0.1:
+		sys.stderr.write("Warning (eres_automatic): Zero or tiny energy range.\n")
+		return 1e-3
+	for sz in [0.5, 20., 100., 500., 2000., 10000.]:
+		if esize <= sz:
+			return sz / 2000. if high_precision else sz / 500.
+	sys.stderr.write("Warning (eres_automatic): Extremely large energy range.\n")
+	return 100.
+
+def eres_from_target(erange, target):
+	"""Get energy resolution (step size) from target resolution or amount.
+	Allowed step sizes are 1, 2, 5 times a power of 10.
+
+	Arguments:
+	erange  List or array. Energy values from which the total interval (esize)
+	        is extracted.
+	target  Integer or float. If integer, the step size will be chosen such that
+	        the number of energy values exceeds this number. Values < 20 are
+	        treated as 20. If float, the step size will be <= than this number.
+
+	Returns:
+	eres    Float. Energy resolution (step size).
+	"""
+	emin = min(erange)
+	emax = max(erange)
+	esize = emax - emin
+	multipliers = [5, 2, 1]  # need to be in descending order
+	if isinstance(target, (int, np.integer)):
+		if target <= 0:
+			return eres_automatic(erange)
+		elif target < 20:
+			target = 20
+		for p10 in range(3, -8, -1):  # power of 10
+			for m in multipliers:
+				if (m * 10**p10) * target <= esize:
+					return m * 10**p10
+		return eres_automatic(erange)  # fallthrough
+	elif isinstance(target, (float, np.floating)):
+		if target <= 0:
+			return eres_automatic(esize)
+		for p10 in range(3, -8, -1):  # power of 10
+			for m in multipliers:
+				if (m * 10**p10) <= target:
+					return m * 10**p10
+		return eres_automatic(erange)  # fallthrough
+	else:
+		raise TypeError("Argument target must be integer of float.")
+
+def erange_from_target_eres(erange, target):
+	"""Wrapper around eres_from_target(). Returns energy range 3-tuple."""
+	if erange is None:
+		erange = [-100, 100]
+	elif isinstance(erange, tuple) and len(erange) == 3:
+		erange = [erange[0], erange[1]]
+	eres = eres_from_target(erange, target)
+	return (erange[0], erange[1], eres)
+
+
+def get_erange(*args):
+	"""Get energy range, trying to avoid rounding errors.
+	This function avoids the rounding errors of np.arange(emin, emax, eres).
+
+	Arguments:
+	emin, emax, eres    Floats. Return an array from emin to emax in steps of
+	                    eres.
+	(emin, emax, eres)  Tuple of floats. Equivalent to previous.
+	arr                 Numpy array of dimension 1. Extract emin and emax as the
+	                    minimum and maximum of arr and return a uniformly spaced
+	                    array with the same length as arr.
+
+	Note:
+	If eres is None, get a value automatically using eres_automatic().
+
+	Returns:
+	erange   Numpy array of dimension 1 with uniform spacing between subsequent
+	         values.
+	"""
+	# TODO: Make class?
+	if len(args) == 3:
+		emin, emax, eres = args
+	elif len(args) == 1:
+		if isinstance(args[0], tuple) and len(args[0]) == 3:
+			emin, emax, eres = args[0]
+		elif isinstance(args[0], np.ndarray):
+			arr = args[0]
+			if arr.ndim != 1:
+				raise ValueError("Input array must be of dimension 1")
+			if len(arr) < 2:
+				raise ValueError("Input array must have at least 2 entries")
+			emin = arr.min()
+			emax = arr.max()
+			if len(arr) > 2:
+				if np.amin(np.diff(arr)) < 0:
+					sys.stderr.write("Warning (get_erange): Input array is not an increasing sequence\n.")
+				if np.amax(np.abs(np.diff(arr, n = 2))) > 1e-6:
+					sys.stderr.write("Warning (get_erange): Input array is not a uniformly spaced sequence\n.")
+			eres = (emax - emin) / (len(arr) - 1)
+		else:
+			raise TypeError("Argument should be a 3-tuple, an array, or 3 numbers.")
+	else:
+		raise TypeError("Argument should be a 3-tuple, an array, or 3 numbers.")
+	if eres is None:
+		eres = eres_automatic([emin, emax])
+	esize = emax - emin
+	ne = np.round(esize / eres)
+	if np.abs(ne * eres - esize) > 1e-6:
+		emax = emin + ne * eres  # make range commensurate with resolution
+	return np.linspace(emin, emax, int(ne) + 1)
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/errorhandling.py b/kdotpy-v1.0.0/src/kdotpy/errorhandling.py
new file mode 100644
index 0000000000000000000000000000000000000000..d889f7bdfed0eb6f59a08d1f3e770b834ebf2dbf
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/errorhandling.py
@@ -0,0 +1,78 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+from sys import stderr
+from .config import get_config_bool
+import traceback as tb
+
+
+class UnexpectedErrorHandler:
+	"""ContextManager for unexpected errors. If any unhandled error happens inside the with code block,
+	this Handler will catch it, may print a traceback or suppress the error, depending on config values.
+
+	Attributes:
+	message         Message to print to stdout in case of error (will always be displayed).
+					If None is given, use a default message.
+	do_handle       Tuple of errors to handle here. Default: (Exception,).
+	dont_handle     Tuple of errors which should not be handled here and are just reraised. Default: ().
+	"""
+	def __init__(self, message = None, do_handle = (Exception,), dont_handle = tuple()):
+		self.message = message if message is not None else "Warning: An unexpected error occurred."
+		self.do_handle = do_handle
+		self.dont_handle = dont_handle
+
+	def __enter__(self):
+		pass  # Nothing to do here when entering the context
+
+	def __exit__(self, exc_type, exc_val, exc_tb):
+		if exc_type is not None and issubclass(exc_type, self.do_handle) and not issubclass(exc_type, self.dont_handle):
+			stderr.write(self.message)
+			if not get_config_bool('err_unexpected_ignore'):
+				stderr.write(" This error may be ignored with config 'err_unexpected_ignore = true'.\n")
+				# Hide the traceback if the error is going to be reraised anyway.
+				return
+			else:
+				if get_config_bool('err_unexpected_print_traceback'):
+					stderr.write("\nTraceback (hide with config 'err_unexpected_print_traceback'):\n")
+					tb.print_exc()
+				else:
+					stderr.write(" Show traceback with config 'err_unexpected_print_traceback = true'.\n")
+				# This prevents re-raising the error after leaving the context manager and code may continue as normal
+				return True
diff --git a/kdotpy-v1.0.0/src/kdotpy/etransform.py b/kdotpy-v1.0.0/src/kdotpy/etransform.py
new file mode 100644
index 0000000000000000000000000000000000000000..f734f8dcdda5dcc9f65b7f84dd8c9eff935279a2
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/etransform.py
@@ -0,0 +1,210 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+
+from .momentum import Vector, VectorGrid
+
+def get_xindex(xval, x):
+	"""Get index/indices for value(s) x in array xval"""
+	if x is None:
+		return None
+	if xval is None:
+		raise ValueError("x values are not defined")
+	if isinstance(x, (float, np.floating)):
+		xdiff = np.abs(xval - x)
+		if np.amin(xdiff) > 1e-6:
+			raise ValueError("x value not present")
+		return np.argmin(xdiff)
+	elif isinstance(x, np.ndarray):
+		xdiff = np.abs(xval[:, np.newaxis] - x[np.newaxis, :])
+		if np.amax(np.amin(xdiff, axis = 1)) > 1e-6:
+			raise ValueError("One or more x values not present")
+		return np.argmin(xdiff, axis = 1)
+	else:
+		raise TypeError("Argument x must be None, float, or array")
+
+class ETransform:
+	"""Object encoding transformation from energy to energy-dependent quantity
+	For example, the density value n(E) may be given as the array y_to; the array
+	e_from has the same size and should contain the corresponding energies.
+	If an energy ee does not appear in the array e_from, the result is
+	determined by linear interpolation.
+
+	Attributes:
+	e_from      Array or None. The source energies. In other words, the domain
+	            of the function n(E). This array can be 1- or 2-dimensional.
+	y_to        Array or None. This array should have the same shape as e_from.
+	            It encodes the values of n(E) (y values) as function of the
+	            values E in e_from.
+	xval        Array or None. An ETransform object may be defined at different
+	            values of momentum or magnetic field. In that case, the input
+	            arrays e_from and y_to must be 2-dimensional. If None, define a
+	            single (not x-dependent) transformation. Then, the input arrays
+	            must be 1-dimensional.
+	qstr        String or None. Quantity string, e.g., 'DOS'.
+	ustr        String or None. Unit string, e.g., 'nm^-2'.
+	plotrange   2-tuple or None. If a 2-tuple of floats, it indicates the
+	            vertical plot range for plots generated with this ETransform
+	            instance.
+	"""
+	def __init__(self, e_from, y_to, xval = None, qstr = None, ustr = None, plotrange = None):
+		self.e_from = None if e_from is None else np.array(e_from)
+		self.y_to = None if y_to is None else np.array(y_to)
+		if not self.e_from.shape[-1] == self.y_to.shape[-1]:
+			raise ValueError("Input arrays must have identical shapes at last axis.")
+		if xval is not None:
+			if not y_to.ndim == 2 and y_to.shape[0] == len(xval):
+				raise ValueError("Array xval has incorrect length")
+			self.xval = np.array(xval)
+		else:
+			self.xval = None
+		self.qstr = qstr
+		self.ustr = ustr
+		self.plotrange = plotrange
+
+	def min(self, ee = None):
+		"""Get minimum y value in an array or in the transformation range.
+
+		Arguments:
+		ee   Array, float or None. If an array or float, transform to the
+		     corresponding y values and return its minimum. If None, return the
+		     minimum y value of the transformation range.
+		"""
+		if ee is None:
+			return self.y_to.min()
+		else:
+			yvals = self.apply(ee)
+			minval = yvals if isinstance(yvals, (float, np.floating)) else yvals.min()
+			return max(minval, self.y_to.min()) if minval == minval else self.y_to.min()
+
+	def max(self, ee = None):
+		"""Get maximum y value in an array or in the transformation range.
+
+		Arguments:
+		ee   Array, float or None. If an array or float, transform to the
+		     corresponding y values and return its maximum. If None, return the
+		     maximum y value of the transformation range.
+		"""
+		if ee is None:
+			return self.y_to.max()
+		else:
+			yvals = self.apply(ee)
+			maxval = yvals if isinstance(yvals, (float, np.floating)) else yvals.max()
+			return min(maxval, self.y_to.max()) if maxval == maxval else self.y_to.max()
+
+	def apply(self, ee, at_x = None):
+		"""Apply transformation.
+
+		Arguments:
+		ee    Array or float. Transform these value to the corresponding y
+		      values. Interpolation is performed for the values not in
+		      self.e_from. The array ee must be 1- or 2-dimensional. If this
+		      instance contains x values (i.e., with y_to being 2-dimensional)
+		      the first axis of ee must have length equal to 1 or to the number
+		      of x values. In other words, an 1-dim array that should not be
+		      treated as x dependent can be entered as [array].
+		at_x  Array, float or None. If an array or float, evaluate at these x
+		      values.
+
+		Returns:
+		ee_tfm   Float or array. The transformed values.
+		"""
+		nan = float("nan")
+		if isinstance(at_x, np.ndarray):
+			if at_x.shape[0] == 1:
+				at_x = at_x[0]
+			elif self.y_to.ndim == 2 and at_x.shape[0] != self.y_to.shape[0]:
+				raise ValueError("If at_x is specified as array, it must have equal length to the y_to array.")
+		elif isinstance(at_x, VectorGrid):
+			raise TypeError("Argument at_x may not be a VectorGrid instance. Extract and pass an array with the appropriate values.")
+		at_x_vector = isinstance(at_x, Vector) or (isinstance(at_x, np.ndarray) and len(at_x) > 0 and isinstance(at_x[0], Vector))
+		xval_vector = isinstance(self.xval, np.ndarray) and len(self.xval) > 0 and isinstance(self.xval[0], Vector)
+		if xval_vector and not at_x_vector:
+			xval = np.array([x.component(None) for x in self.xval])
+		else:
+			xval = self.xval
+
+		if self.e_from is None or self.y_to is None:
+			return ee
+
+		if not isinstance(ee, (float, np.floating, tuple, list, np.ndarray)):
+			raise TypeError("Input must be a float or an array/list/tuple")
+		ee = np.asarray(ee)
+		if ee.ndim not in [0, 1, 2]:
+			raise ValueError("Input value ee must have dimension 0, 1, or 2")
+
+		if self.y_to.ndim == 1:
+			if ee.ndim == 0 or ee.ndim == 1:
+				return np.interp(ee, self.e_from, self.y_to, left = nan, right = nan)
+			elif ee.ndim == 2:
+				if ee.shape[0] == 1:
+					return np.interp(ee[0], self.e_from, self.y_to, left = nan, right = nan)
+				else:
+					return np.array([np.interp(ee1, self.e_from, self.y_to, left = nan, right = nan) for ee1 in ee])
+		elif self.y_to.ndim == 2:
+			if ee.ndim == 0:
+				idx = get_xindex(xval, at_x)
+				if isinstance(at_x, (float, np.floating)):
+					return np.interp(ee, self.e_from, self.y_to[idx], left = nan, right = nan)
+				elif isinstance(at_x, np.ndarray):
+					return np.array([np.interp(ee, self.e_from, self.y_to[i], left = nan, right = nan) for i in idx])
+				else:
+					return np.array([np.interp(ee, self.e_from, y_to1, left = nan, right = nan) for y_to1 in self.y_to])
+			elif ee.shape[0] == 1:  # ee.ndim = 1, 2
+				idx = get_xindex(xval, at_x)
+				if isinstance(at_x, (float, np.floating)):
+					return np.interp(ee[0], self.e_from, self.y_to[idx], left = nan, right = nan)
+				elif isinstance(at_x, np.ndarray):
+					return np.array([np.interp(ee[0], self.e_from, self.y_to[i], left = nan, right = nan) for i in idx])
+				else:
+					return np.array([np.interp(ee[0], self.e_from, y_to1, left = nan, right = nan) for y_to1 in self.y_to])
+			elif ee.shape[0] == self.y_to.shape[0]:  # ee.ndim = 1, 2
+				idx = get_xindex(xval, at_x)
+				if isinstance(at_x, (float, np.floating)):
+					return np.interp(ee[idx], self.e_from, self.y_to[idx], left = nan, right = nan)
+				elif isinstance(at_x, np.ndarray):
+					return np.array([np.interp(ee[i], self.e_from, self.y_to[i], left = nan, right = nan) for i in idx])
+				else:
+					return np.array([np.interp(ee1, self.e_from, y_to1, left = nan, right = nan) for ee1, y_to1 in zip(ee, self.y_to)])
+			else:
+				raise ValueError("Argument ee and array y_to have incompatible shapes %s and %s." % (ee.shape, self.y_to.shape))
+		else:
+			raise TypeError("Invalid dimensionality of y arrays")
diff --git a/kdotpy-v1.0.0/src/kdotpy/extrema.py b/kdotpy-v1.0.0/src/kdotpy/extrema.py
new file mode 100644
index 0000000000000000000000000000000000000000..223a60bcce0753e0972bab47241dd6b372e7e324
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/extrema.py
@@ -0,0 +1,1600 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+import numpy.linalg as nplin
+import sys
+
+from .physconst import hbarm0
+from .momentum import Vector, VectorGrid, vectorgrid_from_components, to_polar, to_spherical
+
+
+class BandExtremum:
+	"""Container class for a single band extremum
+
+	Attributes:
+	bindex    Integer. Band index.
+	llindex   Integer or None. LL index.
+	char      String. Character of the band.
+	minmax    String. 'min' or 'max'
+	k         Vector. Momentum of magnetic field value.
+	energy    Float. Energy value of the extremum.
+	invmass   Float or tuple. Inverse mass, i.e., the value(s) of the second
+	          derivative.
+	"""
+	def __init__(self, minmax, k, energy, invmass, bindex = None, llindex = None, char = None):
+		self.bindex = bindex
+		self.llindex = llindex
+		self.char = char
+		if not isinstance(minmax, str):
+			raise TypeError
+		if not isinstance(k, (Vector, tuple)):
+			raise TypeError
+		if not isinstance(energy, (float, np.floating)):
+			raise TypeError
+		self.minmax = minmax
+		self.k = k
+		self.energy = energy
+		if isinstance(invmass, (float, np.floating, int, np.integer)):
+			self.mass = float("nan") if invmass == 0.0 else -hbarm0 / invmass
+		elif isinstance(invmass, (tuple, list, np.ndarray)):
+			self.mass = tuple(float("nan") if im == 0.0 else -hbarm0 / im for im in invmass)
+		else:
+			raise TypeError("Argument invmass must be numeric or a list/tuple/array")
+
+	def __str__(self):
+		mstr = ("%6.3f" % self.mass) if isinstance(self.mass, (float, np.floating)) else ("(" + ", ". join(["%6.3f" % m for m in self.mass]) + ")")
+		return "(%s, k = %s nm^-1, E = %9.3f meV, m_kx/m_0 = %s)" % (self.minmax, self.k, self.energy, mstr)
+
+	def todict(self):
+		data_dict = {'k': self.k, 'E': self.energy, 'mass': self.mass, 'minmax': self.minmax}
+		if self.bindex is not None:
+			data_dict['bindex'] = self.bindex
+		if self.llindex is not None:
+			data_dict['llindex'] = self.llindex
+		if self.char is not None:
+			data_dict['char'] = self.char
+		return data_dict
+
+	def vectorize_momentum(self, var, constval, const, **kwds):
+		"""Replace numerical or tuple momentum value by appropriate Vector instance"""
+		if isinstance(self.k, Vector):  # Pass silently if already a Vector
+			return
+		grid = vectorgrid_from_components(self.k, var, constval, const, **kwds)
+		self.k = grid[0]
+
+def band_minima_maxima(data, do_print = True):
+	"""Get global band minima and maxima.
+
+	Arguments:
+	data          DiagData instance
+	do_print      True or False. Whether to print the results to stdout.
+
+	Returns:
+	bands_minima  Dict instance of the form {b: e_min, ...}, where b is the band
+	              index and e_min is its global minimum value.
+	bands_maxima  Dict instance of the form {b: e_max, ...}, where b is the band
+	              index and e_max is its global maximum value.
+	"""
+	if len(data) <= 1:
+		sys.stderr.write("Warning (band_minima_maxima): Insufficient dispersion data.\n")
+		return None, None
+	if len(data.shape) > 1:
+		sys.stderr.write("Warning (band_minima_maxima): Not implemented for data point grids of dim > 1.\n")
+		return None, None
+
+	data_k0 = data.get_zero_point()
+	if data_k0 is None:
+		sys.stderr.write("Warning (band_local_extrema): Zero momentum not included in data. Minima and maxima at zero momentum may be missed.\n")
+		data_k0 = data.get_base_point()  # Take base point instead
+
+	if data_k0.bindex is None:
+		sys.stderr.write("ERROR (band_minima_maxima): Band indices are needed for extremal-value calculation, but they are missing.\n")
+		return None, None
+
+	bands_minima = {}
+	bands_maxima = {}
+	b_idx_min = 0
+	b_idx_max = 0
+	for d in data:
+		for e, b in zip(d.eival, d.bindex):
+			if b not in bands_minima:
+				bands_minima[b] = e
+				bands_maxima[b] = e
+				b_idx_min = min(b_idx_min, b)
+				b_idx_max = max(b_idx_max, b)
+			else:
+				bands_minima[b] = min(bands_minima[b], e)
+				bands_maxima[b] = max(bands_maxima[b], e)
+
+	if do_print:
+		print("Bands minima and maxima:")
+		print("                   Min           Zero          Max")
+
+		for b in range(b_idx_max, b_idx_min - 1, -1):
+			if b not in bands_minima or b not in data_k0.bindex:
+				continue
+			bt = None
+			if data_k0 is not None and data_k0.char is not None:
+				bt = data_k0.get_char((b,))
+			e0 = data_k0.get_eival((b,))
+			if b in bands_minima:
+				if bt is None:
+					print("Band %3i       : [%8.3f meV, %8.3f meV, %8.3f meV]" % (b, bands_minima[b], e0, bands_maxima[b]))
+				else:
+					print("Band %3i (%-4s): [%8.3f meV, %8.3f meV, %8.3f meV]" % (b, bt, bands_minima[b], e0, bands_maxima[b]))
+		print()
+	return bands_minima, bands_maxima
+
+
+### MULTIPOINT EXTREMUM SOLVERS ###
+# Solve the variables f0, x0, c from the equation f(x) = f0 + c (x-x0)^2
+def three_point_extremum_solver(x, fx):
+	"""Three point extremum solver (1D)
+	Solve the variables f0, x0, c from the equation f(x) = f0 + c (x-x0)^2
+
+	Arguments:
+	x   List or array of length 3. The x values. They must be equally spaced.
+	fx  List or array of length 3. The values f(x).
+
+	Returns:
+	f0  Float. Function value at extremum.
+	x0  Float. x value at extremum.
+	c   Float. Coefficient of the quadratic term.
+	"""
+	# TODO: Assume x values are equally spaced
+	if not (isinstance(x, (list, np.ndarray)) and len(x) == 3):
+		raise TypeError
+	if not (isinstance(fx, (list, np.ndarray)) and len(fx) == 3):
+		raise TypeError
+	c = (fx[2] - 2 * fx[1] + fx[0]) / (x[1] - x[0])**2 / 2
+	x0 = x[1] - (fx[2] - fx[0]) / (x[2] - x[0]) / 2 / c
+	f0 = fx[1] - c * (x[1] - x0)**2
+	return f0, x0, c
+
+
+display_nine_point_warning = True
+def nine_point_extremum_solver(xy, fxy):
+	"""Nine point extremum solver (2D)
+	Solve the variables f0, (x0, y0), (a, b, c) from the equation
+	  f(x,y) = f0 + a (x-x0)^2 + b (y-y0)^2 + c (x-x0) (y-y0)
+
+	Note:
+	We have 9 input variables and only 6 unknowns. The values used for the
+	solution are f(0,0), f(dx,0), f(-dx,0), f(0,dy), f(0,-dy) and
+	f(dx,dy) - f(-dx,dy) - f(dx,-dy) + f(-dx,-dy). The result is thus inexact
+	at the four corner points. It is also not equivalent to a least-squares fit.
+
+	Arguments:
+	xy   Array of shape (3, 3). The (x, y) values. They must be arranged on an
+	     equally spaced 3x3 grid.
+	fxy  Array of shape (3, 3). The values f(x, y).
+
+	Returns:
+	f0           Float. Function value at extremum.
+	(x0, y0)     Float. (x, y) value at extremum.
+	(a, b, c)    Float. Coefficients of the quadratic terms.
+	"""
+	# TODO: Assume x and y values are equally spaced and that xy is a cartesian grid
+	xy = np.asarray(xy)
+	if not xy.shape == (3, 3, 2):
+		raise TypeError
+	fxy = np.asarray(fxy)
+	if not fxy.shape == (3, 3):
+		raise TypeError
+
+	x = xy[:, 0, 0]
+	y = xy[0, :, 1]
+
+	dx = (x[1] - x[0])
+	dy = (y[1] - y[0])
+	if dx == 0 or dy == 0:
+		raise ValueError("Singular xy data")
+
+	a = (fxy[2, 1] - 2 * fxy[1, 1] + fxy[0, 1]) / dx**2 / 2
+	b = (fxy[1, 2] - 2 * fxy[1, 1] + fxy[1, 0]) / dy**2 / 2
+	c = (fxy[2, 2] - fxy[2, 0] - fxy[0, 2] + fxy[0, 0]) / dx / dy / 4
+	X = (fxy[2, 1] - fxy[0, 1]) / dx / 2
+	Y = (fxy[1, 2] - fxy[1, 0]) / dy / 2
+	x0 = x[1] + (c * Y - 2 * b * X) / (4 * a * b - c**2)  # denominator is det(Hessian)
+	y0 = y[1] + (c * X - 2 * a * Y) / (4 * a * b - c**2)  # denominator is det(Hessian)
+	f0 = fxy[1, 1] - a * (x[1] - x0)**2 - b * (y[1] - y0)**2 - c * (x[1] - x0) * (y[1] - y0)
+
+	if x0 < x[0] or x0 > x[-1] or y0 < y[0] or y0 > y[-1]:
+		if display_nine_point_warning:
+			sys.stderr.write("Warning (nine_point_extremum_solver): Poorly defined extremum found at approx. (x, y) = (%.3f, %.3f), f = %.3f.\n" % (x[1], y[1], fxy[1][1]))
+		# set c = 0
+		x0 = x[1] - X / (2 * a)
+		y0 = y[1] - Y / (2 * b)
+		f0 = fxy[1, 1] - a * (x[1] - x0)**2 - b * (y[1] - y0)**2 - c * (x[1] - x0) * (y[1] - y0)
+		return f0, (x0, y0), (a, b, c)
+	else:
+		return f0, (x0, y0), (a, b, c)
+
+
+display_nineteen_point_warning = True
+def nineteen_point_extremum_solver(xyz, fxyz):
+	"""Nineteen point extremum solver (3D)
+	Solve the variables f0, (x0, y0, z0), (a, b, c, d, e, f) from the equation
+	                             [ 2a  d  e ] [x1]
+	  f(x,y,z) = f0 + [x1 y1 z1] [  d 2b  f ] [y1],
+	                             [  e  f 2c ] [z1]
+	where x1 = x - x0, y1 = y - y0 and z1 = z - z0. The 3x3 matrix in this
+	equation is the Hessian matrix.
+
+	Note:
+	The input array is a 3x3x3 grid, but the values at the 8 corner points are
+	not considered. We have 19 remaining input variables and 10 unknowns. The
+	result is thus inexact at the some of the points.
+
+	Arguments:
+	xyz   Array of shape (3, 3, 3). The (x, y, z) values. They must be arranged
+	      on an equally spaced 3x3x3 grid.
+	fxyz  Array of shape (3, 3, 3). The values f(x, y, z).
+
+	Returns:
+	f0                   Float. Function value at extremum.
+	(x0, y0, z0)         Floats. (x, y, z) value at extremum.
+	(a, b, c, d, e, f)   Floats. Coefficients of the quadratic terms (that
+	                     defines the Hessian matrix).
+	"""
+	# TODO: Assume x, y, z values are equally spaced and that xyz is a cartesian grid
+	xyz = np.asarray(xyz)
+	if not xyz.shape == (3, 3, 3, 3):
+		raise TypeError
+	fxyz = np.asarray(fxyz)
+	if not fxyz.shape == (3, 3, 3):
+		raise TypeError
+
+	x = xyz[:, 0, 0, 0]
+	y = xyz[0, :, 0, 1]
+	z = xyz[0, 0, :, 2]
+
+	dx = (x[1] - x[0])
+	dy = (y[1] - y[0])
+	dz = (z[1] - z[0])
+	if dx == 0 or dy == 0 or dz == 0:
+		raise ValueError("Singular xyz data")
+
+	a = (fxyz[2, 1, 1] - 2 * fxyz[1, 1, 1] + fxyz[0, 1, 1]) / dx**2 / 2
+	b = (fxyz[1, 2, 1] - 2 * fxyz[1, 1, 1] + fxyz[1, 0, 1]) / dy**2 / 2
+	c = (fxyz[1, 1, 2] - 2 * fxyz[1, 1, 1] + fxyz[1, 1, 0]) / dz**2 / 2
+	d = (fxyz[2, 2, 1] - fxyz[2, 0, 1] - fxyz[0, 2, 1] + fxyz[0, 0, 1]) / dx / dy / 4
+	e = (fxyz[2, 1, 2] - fxyz[2, 1, 0] - fxyz[0, 1, 2] + fxyz[0, 1, 0]) / dx / dz / 4
+	f = (fxyz[1, 2, 2] - fxyz[1, 2, 0] - fxyz[1, 0, 2] + fxyz[1, 0, 0]) / dy / dz / 4
+	X = (fxyz[2, 1, 1] - fxyz[0, 1, 1]) / dx / 2
+	Y = (fxyz[1, 2, 1] - fxyz[1, 0, 1]) / dy / 2
+	Z = (fxyz[1, 1, 2] - fxyz[1, 1, 0]) / dz / 2
+	hessian = np.array([[2 * a, d, e], [d, 2 * b, f], [e, f, 2 * c]])
+	detH = nplin.det(hessian)
+	if abs(detH) > 1e-6:
+		x0, y0, z0 = np.array([x[1], y[1], z[1]]) + nplin.inv(hessian) @ np.array([-X, -Y, -Z])
+		f0 = fxyz[1, 1, 1] - a * (x[1] - x0)**2 - b * (y[1] - y0)**2 - c * (z[1] - z0)**2 - d * (x[1] - x0) * (y[1] - y0) - e * (x[1] - x0) * (z[1] - z0) - f * (y[1] - y0) * (z[1] - z0)
+
+	if abs(detH) <= 1e-6 or x0 < x[0] or x0 > x[-1] or y0 < y[0] or y0 > y[-1] or z0 < z[0] or z0 > z[-1]:
+		if display_nineteen_point_warning:
+			sys.stderr.write("Warning (nineteen_point_extremum_solver): Poorly defined extremum found at approx. (x, y, z) = (%.3f, %.3f, %.3f), f = %.3f.\n" % (x[1], y[1], z[1], fxyz[1][1][1]))
+		# set d = e = f = 0
+		x0 = x[1] - X / (2 * a)
+		y0 = y[1] - Y / (2 * b)
+		z0 = z[1] - Z / (2 * c)
+		f0 = fxyz[1, 1, 1] - a * (x[1] - x0)**2 - b * (y[1] - y0)**2 - c * (z[1] - z0)**2 - d * (x[1] - x0) * (y[1] - y0) - e * (x[1] - x0) * (z[1] - z0) - f * (y[1] - y0) * (z[1] - z0)
+		return f0, (x0, y0, z0), (a, b, c, d, e, f)
+	else:
+		return f0, (x0, y0, z0), (a, b, c, d, e, f)
+
+def invmasses_2d_from_abc(*arg, polar = False, degrees = True, xy = None):
+	"""Calculate the two inverse masses from the values (a, b, c) obtained from the nine-point equation solver.
+
+	The argument may be a, b, c or the tuple (a, b, c).
+	The input values refer to the equation f(x,y) = a x^2 + b y^2 + c x y.
+	The returned values are the eigenvalues of the Hessian matrix
+	  [ 2a   c ]
+	  [  c  2b ]
+	"""
+	if isinstance(arg, tuple) and len(arg) == 3:
+		a, b, c = arg
+	elif isinstance(arg, tuple) and len(arg) == 1 and isinstance(arg[0], tuple) and len(arg[0]) == 3:
+		a, b, c = arg[0]
+	else:
+		raise TypeError
+	if polar:
+		# Interpret as polar coordinates. Use r dphi as differential for the
+		# second coordinate.
+		degmult = 180. / np.pi if degrees else 1  # conversion radians to degrees
+		mult = degmult / xy[0]  # 1 / r
+		b *= mult**2
+		c *= mult
+
+	q = (a - b)**2 + c**2
+	if q >= 0.0:
+		return 0.5 * (a + b + np.sqrt(q)), 0.5 * (a + b - np.sqrt(q))
+	else:
+		return 0.5 * (a + b + 1.j * np.sqrt(-q)), 0.5 * (a + b - 1.j * np.sqrt(-q))
+
+def invmasses_3d_from_abcdef(*arg, cylindrical = False, spherical = False, degrees = True, xyz = None):
+	"""Calculate the three inverse masses from the values (a, b, c, d, e, f) obtained from the nineteen-point equation solver.
+
+	The argument may be a, b, c, d, e, f or the tuple (a, b, c, d, e, f).
+	These values define the Hessian matrix, see
+	nineteen_point_extremum_solver(). The returned values are the eigenvalues of
+	the Hessian matrix.
+	"""
+	if isinstance(arg, tuple) and len(arg) == 6:
+		a, b, c, d, e, f = arg
+	elif isinstance(arg, tuple) and len(arg) == 1 and isinstance(arg[0], tuple) and len(arg[0]) == 6:
+		a, b, c, d, e, f = arg[0]
+	else:
+		raise TypeError
+
+	hessian = np.array([[2 * a, d, e], [d, 2 * b, f], [e, f, 2 * c]])
+	if cylindrical:
+		degmult = 180. / np.pi if degrees else 1  # conversion radians to degrees
+		phimult = degmult / xyz[0]  # 1 / r
+		multmat = np.diag([1.0, phimult, 1.0])
+		hessian = multmat @ hessian @ multmat
+	elif spherical:
+		degmult = 180. / np.pi if degrees else 1  # conversion radians to degrees
+		thetamult = degmult / xyz[0]  # 1 / r
+		phimult = degmult / xyz[0] / np.sin(xyz[1] / degmult)  # 1 / r sin(theta)
+		multmat = np.diag([1.0, thetamult, phimult])
+		hessian = multmat @ hessian @ multmat
+	eival, eivec = nplin.eigh(hessian)
+	# Put eigenvalues in correct order if they correspond to the unit vectors:
+	order = np.argsort(np.array(np.abs(eivec.T)) @ np.array([1, 10, 100]))
+	return tuple(0.5 * eival[order])
+
+def local_extrema_1d(x, fx, accuracy = 0.0, three_point = True, extend = None):
+	"""Find local extrema in 1D.
+	Use a crude algorithm by comparing neighbouring values f(x - dx), f(x), and
+	f(x + dx), and use the multi-point extremum solver to get a result to higher
+	accuracy. The input array may be extended as to find extrema at the edge.
+	This is useful, for example, at k = 0.
+
+	Arguments:
+	x            Array. Values for x.
+	fx           Array. Values for f(x).
+	accuracy     Float. If the values f(x - dx), f(x), f(x + dx) are no more
+	             than this value apart, do not consider an extremum at this
+	             point.
+	three_point  True or False. Whether to use the three-point extremum solver.
+	extend       List of length 2. If the elements are True or False, whether to
+	             extend the grid at lower and upper end. If the elements are
+	             floats, only extend if this value matches the value of x at the
+	             lower or upper end. The values 0 and 'auto' are equivalent to
+	             [0.0, 0.0]. The value None means no extension.
+
+	Returns:
+	List of BandExtremum instances.
+	"""
+	x = np.asarray(x)
+	fx = np.asarray(fx)
+
+	# Apply grid extension
+	if extend is True:
+		extend = [True, True]
+	elif extend == 0 or extend == 'auto':
+		extend = [0.0, 0.0]
+	if isinstance(extend, list) and len(extend) == 2:
+		if isinstance(extend[0], (float, np.floating)):
+			extend[0] = (abs(x[0] - extend[0]) < 1e-10)
+		if isinstance(extend[1], (float, np.floating)):
+			extend[1] = (abs(x[-1] - extend[1]) < 1e-10)
+
+		if extend[0]:
+			x = np.concatenate((2 * x[:1] - x[1:2], x))
+			fx = np.concatenate((fx[1:2], fx))
+		if extend[1]:
+			x = np.concatenate((x, 2 * x[-1:] - x[-2:-1]))
+			fx = np.concatenate((fx, fx[-2:-1]))
+	jx = np.arange(0, len(x))  # index array
+
+	# Minima
+	ex_min = []
+	with np.errstate(invalid = "ignore"):  # Suppress "RuntimeWarning: invalid value encountered in greater" (nan values)
+		rb = fx[2:] > fx[1:-1] + accuracy   # value to right bigger
+		lb = fx[:-2] > fx[1:-1] + accuracy  # value to left bigger
+	# Apply three-point interpolator
+	if three_point:
+		jmin = jx[1:-1][lb & rb]
+		for j in jmin:
+			ff, xx, cc = three_point_extremum_solver(x[j-1:j+2], fx[j-1:j+2])
+			ex_min.append(BandExtremum("min", (xx,), ff, cc))
+	else:
+		xmin = x[1:-1][lb & rb]
+		fmin = fx[1:-1][lb & rb]
+		ex_min = [BandExtremum("min", (xx,), ff, 0.0) for ff, xx in zip(fmin, xmin)]
+
+	# Maxima
+	ex_max = []
+	with np.errstate(invalid = "ignore"):  # Suppress "RuntimeWarning: invalid value encountered in greater" (nan values)
+		rs = fx[2:] < fx[1:-1] - accuracy   # value to right smaller
+		ls = fx[:-2] < fx[1:-1] - accuracy  # value to left smaller
+	# Apply three-point interpolator
+	if three_point:
+		jmax = jx[1:-1][ls & rs]
+		for j in jmax:
+			fmax, xmax, cmax = three_point_extremum_solver(x[j-1:j+2], fx[j-1:j+2])
+			ex_max.append(BandExtremum("max", (xmax,), fmax, cmax))
+	else:
+		xmax = x[1:-1][ls & rs]
+		fmax = fx[1:-1][ls & rs]
+		ex_max = [BandExtremum("max", (xx,), ff, 0.0) for ff, xx in zip(fmax, xmax)]
+
+	return ex_min + ex_max
+
+def local_extrema_2d(xy, fxy, accuracy = 0.0, nine_point = True, extend = None, polar = False, degrees = True):
+	"""Find local extrema in 2D.
+	Use a crude algorithm by comparing the function at neighbouring values of
+	(x, y). Use the multi-point extremum solver to get a result to higher
+	accuracy. The input array may be extended as to find extrema at the edge.
+	This is useful, for example, at k = 0.
+
+	Arguments:
+	xy           Array. Values for (x, y).
+	fxy          Array. Values for f(x, y).
+	accuracy     Float. If the values of the function at neighbouring points are
+	             no more than this value apart, do not consider an extremum at
+	             this point.
+	nine_point   True or False. Whether to use the nine-point extremum solver.
+	extend       List of length 4. If the elements are True or False, whether to
+	             extend the grid at the four edges of the grid. If the elements
+	             are floats, only extend if this value matches the value of x or
+	             y at the corresponding edge. The values 0 and 'auto' are
+	             equivalent to [0.0, 0.0, 0.0, 0.0]. The value None means no
+	             extension.
+	polar        True or False. Whether to use polar coordinates.
+	degrees      True or False. Whether angular coordinates are in degrees.
+
+	Returns:
+	List of BandExtremum instances.
+	"""
+	xy = np.asarray(xy)
+	fxy = np.asarray(fxy)
+
+	# TODO: Test that xy is a grid
+	x = xy[:, 0, 0]
+	y = xy[0, :, 1]
+
+	# Apply grid extension
+	if extend is True:
+		extend = [True, True, True, True]
+	elif extend == 0 or extend == 'auto':
+		extend = [0.0, 0.0, 0.0, 0.0]
+	if isinstance(extend, list) and len(extend) == 4:
+		if isinstance(extend[0], (float, np.floating)):
+			extend[0] = (abs(x[0] - extend[0]) < 1e-10)
+		if isinstance(extend[1], (float, np.floating)):
+			extend[1] = (abs(x[-1] - extend[1]) < 1e-10)
+		if isinstance(extend[2], (float, np.floating)):
+			extend[2] = (abs(y[0] - extend[2]) < 1e-10)
+		if isinstance(extend[3], (float, np.floating)):
+			extend[3] = (abs(y[-1] - extend[3]) < 1e-10)
+
+		if extend[0]:
+			xy = np.concatenate((2 * xy[:1, :, :] - xy[1:2, :, :], xy), axis = 0)
+			fxy = np.concatenate((fxy[1:2, :], fxy), axis = 0)
+		if extend[1]:
+			xy = np.concatenate((xy, 2 * xy[-1:, :, :] - xy[-2:-1, :, :]), axis = 0)
+			fxy = np.concatenate((fxy, fxy[-2:-1, :]), axis = 0)
+		if extend[2]:
+			xy = np.concatenate((2 * xy[:, :1, :] - xy[:, 1:2, :], xy), axis = 1)
+			fxy = np.concatenate((fxy[:, 1:2], fxy), axis = 1)
+		if extend[3]:
+			xy = np.concatenate((xy, 2 * xy[:, -1:, :] - xy[:, -2:-1, :]), axis = 1)
+			fxy = np.concatenate((fxy, fxy[:, -2:-1]), axis = 1)
+
+	jx, jy = np.meshgrid(np.arange(0, xy.shape[0]), np.arange(0, xy.shape[1]), indexing='ij')
+
+	# Minima
+	ex_min = []
+	with np.errstate(invalid = "ignore"):  # Suppress "RuntimeWarning: invalid value encountered in greater" (nan values)
+		xp = fxy[2:, 1:-1] > fxy[1:-1, 1:-1] + accuracy
+		xm = fxy[:-2, 1:-1] > fxy[1:-1, 1:-1] + accuracy
+		yp = fxy[1:-1, 2:] > fxy[1:-1, 1:-1] + accuracy
+		ym = fxy[1:-1, :-2] > fxy[1:-1, 1:-1] + accuracy
+		pp = fxy[2:, 2:] > fxy[1:-1, 1:-1] + accuracy
+		mp = fxy[:-2, 2:] > fxy[1:-1, 1:-1] + accuracy
+		pm = fxy[2:, :-2] > fxy[1:-1, 1:-1] + accuracy
+		mm = fxy[:-2, :-2] > fxy[1:-1, 1:-1] + accuracy
+	mincond = xp & xm & yp & ym & pp & mm & pm & mp
+	# Apply nine-point interpolator
+	if nine_point:
+		jxmin = jx[1:-1, 1:-1][mincond]
+		jymin = jy[1:-1, 1:-1][mincond]
+		for i, j in zip(jxmin, jymin):
+			ff, xx, hess = nine_point_extremum_solver(xy[i-1:i+2, j-1:j+2], fxy[i-1:i+2, j-1:j+2])	
+			cc = invmasses_2d_from_abc(hess, polar=polar, degrees=degrees, xy=xx)
+			ex_min.append(BandExtremum("min", tuple(xx), ff, cc))
+	else:
+		xymin = xy[1:-1, 1:-1][mincond]
+		fmin = fxy[1:-1, 1:-1][mincond]
+		ex_min = [BandExtremum("min", tuple(xx), ff, (0.0, 0.0)) for ff, xx in zip(fmin, xymin)]
+
+	# Maxima
+	ex_max = []
+	with np.errstate(invalid = "ignore"):  # Suppress "RuntimeWarning: invalid value encountered in greater" (nan values)
+		xp = fxy[2:, 1:-1] < fxy[1:-1, 1:-1] - accuracy
+		xm = fxy[:-2, 1:-1] < fxy[1:-1, 1:-1] - accuracy
+		yp = fxy[1:-1, 2:] < fxy[1:-1, 1:-1] - accuracy
+		ym = fxy[1:-1, :-2] < fxy[1:-1, 1:-1] - accuracy
+		pp = fxy[2:, 2:] < fxy[1:-1, 1:-1] - accuracy
+		mp = fxy[:-2, 2:] < fxy[1:-1, 1:-1] - accuracy
+		pm = fxy[2:, :-2] < fxy[1:-1, 1:-1] - accuracy
+		mm = fxy[:-2, :-2] < fxy[1:-1, 1:-1] - accuracy
+	maxcond = xp & xm & yp & ym & pp & mm & pm & mp
+	# Apply nine-point interpolator
+	if nine_point:
+		jxmax = jx[1:-1, 1:-1][maxcond]
+		jymax = jy[1:-1, 1:-1][maxcond]
+		for i, j in zip(jxmax, jymax):
+			ff, xx, hess = nine_point_extremum_solver(xy[i-1:i+2, j-1:j+2], fxy[i-1:i+2, j-1:j+2])
+			cc = invmasses_2d_from_abc(hess, polar=polar, degrees=degrees, xy=xx)
+			ex_max.append(BandExtremum("max", tuple(xx), ff, cc))
+	else:
+		xymax = xy[1:-1, 1:-1][maxcond]
+		fmax = fxy[1:-1, 1:-1][maxcond]
+		ex_max = [BandExtremum("max", tuple(xx), ff, (0.0, 0.0)) for ff, xx in zip(fmax, xymax)]
+
+	return ex_min + ex_max
+
+def local_extrema_polar_zero(xy, fxy, accuracy = 0.0, nine_point = True, degrees = True):
+	"""Find local extrema in 2D polar coordinates at zero.
+
+	Arguments:
+	xy           Array. Values for (x, y).
+	fxy          Array. Values for f(x, y).
+	accuracy     Float. If the values of the function at neighbouring points are
+	             no more than this value apart, do not consider an extremum at
+	             this point.
+	nine_point   True or False. Whether to use the nine-point extremum solver.
+	degrees      True or False. Whether angular coordinates are in degrees.
+
+	Returns:
+	List of BandExtremum instances.
+	"""
+	aunit = 1 if degrees else 180 / np.pi
+	xy = np.asarray(xy)
+	fxy = np.asarray(fxy)
+
+	# Angles
+	phi = xy[0, :, 1]
+	phimin = phi.min()
+	phimax = phi.max()
+	if len(phi) > 1 and phimin == phimax:
+		raise ValueError("Singular input values (phi)")
+
+	# Radii
+	r = xy[:, 0, 0]
+	j0 = None
+	for jr, rr in enumerate(r):
+		if abs(rr) < 1e-7:
+			j0 = jr
+	if j0 is None:
+		return []
+	if j0 == len(r) - 1:
+		return []
+	dr = r[j0+1]
+
+	# Indices for angles 0, 45, 90, 135 degrees modulo 180
+	i0 = (np.abs(np.mod(phi * aunit + 90, 180) - 90) < 1e-6)
+	i45 = (np.abs(np.mod(phi * aunit, 180) - 45) < 1e-6)
+	i90 = (np.abs(np.mod(phi * aunit, 180) - 90) < 1e-6)
+	i135 = (np.abs(np.mod(phi * aunit, 180) - 135) < 1e-6)
+
+	if np.count_nonzero(i0) == 0 or np.count_nonzero(i90) == 0:
+		return []
+	f0 = np.mean(fxy[j0, :])
+	df0 = np.mean(fxy[j0 + 1, i0] - fxy[j0, i0])
+	df90 = np.mean(fxy[j0 + 1, i90] - fxy[j0, i90])
+	if df0 * df90 <= 0:  # Saddle point, not an extremum
+		return []
+
+	# Construct a cartesian grid around zero
+	if np.count_nonzero(i45) > 0 and np.count_nonzero(i135) > 0:
+		df45 = np.mean(fxy[j0 + 1, i45] - fxy[j0, i45])
+		df135 = np.mean(fxy[j0 + 1, i135] - fxy[j0, i135])
+		nmass = 2
+	elif np.count_nonzero(i45) > 0:
+		df45 = np.mean(fxy[j0 + 1, i45] - fxy[j0, i45])
+		df135 = df45
+		nmass = 1
+	elif np.count_nonzero(i135) > 0:
+		df135 = np.mean(fxy[j0 + 1, i135] - fxy[j0, i135])
+		df45 = df135
+		nmass = 1
+	else:
+		df45 = (df0 + df90) / 2
+		df135 = (df0 + df90) / 2
+		nmass = 0
+	xdata = np.dstack(np.meshgrid([-dr, 0, dr], [-dr, 0, dr], indexing='ij'))
+	ydata = f0 + np.array(
+		[[2 * df45, df0, 2 * df135],
+		[df90, 0, df90],
+		[2 * df135, df0, 2 * df45]])
+
+	# Apply 2d extrema finder on cartesian grid around zero
+	excart = local_extrema_2d(xdata, ydata, extend = False, polar = False, accuracy = accuracy)
+
+	# Invalidate estimated masses, if data at 45 and/or 135 degrees is missing
+	if nmass < 2:
+		for ex in excart:
+			ex.mass = tuple(float('nan') if j >= nmass else m for j, m in enumerate(ex.mass))
+
+	return excart
+
+
+def local_extrema_2d_zero(xy, fxy, accuracy = 0.0, nine_point = True, extend = None, polar = False, degrees = True):
+	"""Find local extrema in 2D and deal with multiple zero points.
+	This function is similar to local_extrema_2d, but handles multiple
+	zero points (i.e., a singularity) more correctly. This is especially useful
+	for polar coordinates.
+
+	Arguments:
+	xy           Array. Values for (x, y).
+	fxy          Array. Values for f(x, y).
+	accuracy     Float. If the values of the function at neighbouring points are
+	             no more than this value apart, do not consider an extremum at
+	             this point.
+	nine_point   True or False. Whether to use the nine-point extremum solver.
+	extend       List of length 4. If the elements are True or False, whether to
+	             extend the grid at the four edges of the grid. If the elements
+	             are floats, only extend if this value matches the value of x or
+	             y at the corresponding edge. The values 0 and 'auto' are
+	             equivalent to [0.0, 0.0, 0.0, 0.0]. The value None means no
+	             extension.
+	polar        True or False. Whether to use polar coordinates.
+	degrees      True or False. Whether angular coordinates are in degrees.
+
+	Returns:
+	List of BandExtremum instances.
+	"""
+	if not polar:
+		raise ValueError("The function local_extrema_2d_zero must have argument polar explicitly set to True")
+	degmult = 180 / np.pi if degrees else 1
+	xy = np.asarray(xy)
+	fxy = np.asarray(fxy)
+
+	if extend is True:
+		extend = [False, False, True, True]
+	elif extend == 0 or extend == 'auto':
+		extend = [False, False, 0.0, 0.0]
+	elif extend is None:
+		extend = [False, False, False, False]
+
+	# Angles
+	phi = xy[0, :, 1]
+	phimin = phi.min()
+	phimax = phi.max()
+	if len(phi) > 1 and phimin == phimax:
+		raise ValueError("Singular input values (phi)")
+
+	# Radii
+	r = xy[:, 0, 0]
+	j0 = None
+	for jr, rr in enumerate(r):
+		if abs(rr) < 1e-7:
+			j0 = jr
+	if j0 is None:
+		return []
+	if j0 == len(r) - 1:
+		return []
+
+	# Function values at r = 0 and smallest nonzero radius r1
+	f0 = np.mean(fxy[j0, :])
+	f1 = np.mean(fxy[j0+1, :])
+	r1 = r[j0+1]
+
+	# The masses are given by the minimum / maximum values on the circle (arc)
+	# (r1, phi) with fixed radius r1. We seek two points on perpendicular positions,
+	# i.e., with angles +/-(pi/2) (+/-90 degrees) apart.
+	ex_r1 = local_extrema_1d(phi, fxy[j0+1, :] - f0, extend = [extend[2], extend[3]])
+	f1_m1 = None
+	f1_phi1 = None
+	f1_m2 = None
+	for ex in ex_r1:
+		if ex.minmax == 'max':
+			f1_phi1, f1_m1 = ex.k[0], ex.energy
+	for ex in ex_r1:
+		try:
+			if ((ex.k[0] - 0.5 * np.pi * degmult) - f1_phi1) < 1e-4:  # Perpendicular direction
+				f1_m2 = ex.energy
+		except:
+			pass
+	if f1_m2 is None:
+		sys.stderr.write("Warning (local_extrema_2d_zero): Angle range too narrow to determine two perpendicular mass directions.\n")
+
+	minmax = "min" if f1 > f0 else "max"
+	m1 = 0.0 if f1_m1 is None else (f1_m1 / r1**2)
+	m2 = 0.0 if f1_m2 is None else (f1_m2 / r1**2)
+
+	return [BandExtremum(minmax, tuple(xy[0, j0]), f0, (m1, m2))]
+
+def local_extrema_3d(xyz, fxyz, accuracy = 0.0, nineteen_point = True, extend = None, cylindrical = False, spherical = False, degrees = True):
+	"""Find local extrema in 3D.
+	Use a crude algorithm by comparing the function at neighbouring values of
+	(x, y, z). Use the multi-point extremum solver to get a result to higher
+	accuracy. The input array may be extended as to find extrema at the edge.
+	This is useful, for example, at k = 0.
+
+	Arguments:
+	xyz             Array. Values for (x, y, z).
+	fxyz            Array. Values for f(x, y, z).
+	accuracy        Float. If the values of the function at neighbouring points
+	                are no more than this value apart, do not consider an
+	                extremum at this point.
+	nineteen_point  True or False. Whether to use the nine-point extremum
+	                solver.
+	extend          List of length 6. If the elements are True or False, whether
+	                to extend the grid at the six edges of the grid. If the
+	                elements are floats, only extend if this value matches the
+	                value of x, y, or z at the corresponding edge. The values 0
+	                and 'auto' are equivalent to [0.0, 0.0, 0.0, 0.0, 0.0, 0.0].
+	                The value None means no extension.
+	cylindrical     True or False. Whether to use cylindrical coordinates.
+	spherical       True or False. Whether to use spherical coordinates.
+	degrees         True or False. Whether angular coordinates are in degrees.
+
+	Returns:
+	List of BandExtremum instances.
+	"""
+	if cylindrical and spherical:
+		raise ValueError("Arguments cylindrical and spherical cannot both be True")
+
+	xyz = np.asarray(xyz)
+	fxyz = np.asarray(fxyz)
+
+	# TODO: Test that xy is a grid
+	x = xyz[:, 0, 0, 0]
+	y = xyz[0, :, 0, 1]
+	z = xyz[0, 0, :, 2]
+
+	# Apply grid extension
+	if extend is True:
+		extend = [True, True, True, True, True, True]
+	elif extend == 0 or extend == 'auto':
+		extend = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+	if isinstance(extend, list) and len(extend) == 6:
+		if isinstance(extend[0], (float, np.floating)):
+			extend[0] = (abs(x[0] - extend[0]) < 1e-10)
+		if isinstance(extend[1], (float, np.floating)):
+			extend[1] = (abs(x[-1] - extend[1]) < 1e-10)
+		if isinstance(extend[2], (float, np.floating)):
+			extend[2] = (abs(y[0] - extend[2]) < 1e-10)
+		if isinstance(extend[3], (float, np.floating)):
+			extend[3] = (abs(y[-1] - extend[3]) < 1e-10)
+		if isinstance(extend[4], (float, np.floating)):
+			extend[4] = (abs(z[0] - extend[4]) < 1e-10)
+		if isinstance(extend[5], (float, np.floating)):
+			extend[5] = (abs(z[-1] - extend[5]) < 1e-10)
+
+		if extend[0]:
+			xyz = np.concatenate((2 * xyz[:1, :, :, :] - xyz[1:2, :, :, :], xyz), axis = 0)
+			fxyz = np.concatenate((fxyz[1:2, :, :], fxyz), axis = 0)
+		if extend[1]:
+			xyz = np.concatenate((xyz, 2 * xyz[-1:, :, :, :] - xyz[-2:-1, :, :, :]), axis = 0)
+			fxyz = np.concatenate((fxyz, fxyz[-2:-1, :]), axis = 0)
+		if extend[2]:
+			xyz = np.concatenate((2 * xyz[:, :1, :, :] - xyz[:, 1:2, :, :], xyz), axis = 1)
+			fxyz = np.concatenate((fxyz[:, 1:2, :], fxyz), axis = 1)
+		if extend[3]:
+			xyz = np.concatenate((xyz, 2 * xyz[:, -1:, :, :] - xyz[:, -2:-1, :, :]), axis = 1)
+			fxyz = np.concatenate((fxyz, fxyz[:, -2:-1, :]), axis = 1)
+		if extend[4]:
+			xyz = np.concatenate((2 * xyz[:, :, :1, :] - xyz[:, :, 1:2, :], xyz), axis = 2)
+			fxyz = np.concatenate((fxyz[:, :, 1:2], fxyz), axis = 2)
+		if extend[5]:
+			xyz = np.concatenate((xyz, 2 * xyz[:, :, -1:, :] - xyz[:, :, -2:-1, :]), axis = 2)
+			fxyz = np.concatenate((fxyz, fxyz[:, :, -2:-1]), axis = 2)
+
+	jx, jy, jz = np.meshgrid(np.arange(0, xyz.shape[0]), np.arange(0, xyz.shape[1]), np.arange(0, xyz.shape[2]), indexing='ij')
+
+	# Minima
+	ex_min = []
+	with np.errstate(invalid = "ignore"):  # Suppress "RuntimeWarning: invalid value encountered in greater" (nan values)
+		xp = fxyz[2:, 1:-1, 1:-1] > fxyz[1:-1, 1:-1, 1:-1] + accuracy
+		xm = fxyz[:-2, 1:-1, 1:-1] > fxyz[1:-1, 1:-1, 1:-1] + accuracy
+		yp = fxyz[1:-1, 2:, 1:-1] > fxyz[1:-1, 1:-1, 1:-1] + accuracy
+		ym = fxyz[1:-1, :-2, 1:-1] > fxyz[1:-1, 1:-1, 1:-1] + accuracy
+		zp = fxyz[1:-1, 1:-1, 2:] > fxyz[1:-1, 1:-1, 1:-1] + accuracy
+		zm = fxyz[1:-1, 1:-1, :-2] > fxyz[1:-1, 1:-1, 1:-1] + accuracy
+		c1 = xp & xm & yp & ym & zp & zm
+		xypp = fxyz[2:, 2:, 1:-1] > fxyz[1:-1, 1:-1, 1:-1] + accuracy
+		xymp = fxyz[:-2, 2:, 1:-1] > fxyz[1:-1, 1:-1, 1:-1] + accuracy
+		xypm = fxyz[2:, :-2, 1:-1] > fxyz[1:-1, 1:-1, 1:-1] + accuracy
+		xymm = fxyz[:-2, :-2, 1:-1] > fxyz[1:-1, 1:-1, 1:-1] + accuracy
+		xzpp = fxyz[2:, 1:-1, 2:] > fxyz[1:-1, 1:-1, 1:-1] + accuracy
+		xzmp = fxyz[:-2, 1:-1, 2:] > fxyz[1:-1, 1:-1, 1:-1] + accuracy
+		xzpm = fxyz[2:, 1:-1, :-2] > fxyz[1:-1, 1:-1, 1:-1] + accuracy
+		xzmm = fxyz[:-2, 1:-1, :-2] > fxyz[1:-1, 1:-1, 1:-1] + accuracy
+		yzpp = fxyz[1:-1, 2:, 2:] > fxyz[1:-1, 1:-1, 1:-1] + accuracy
+		yzmp = fxyz[1:-1, :-2, 2:] > fxyz[1:-1, 1:-1, 1:-1] + accuracy
+		yzpm = fxyz[1:-1, 2:, :-2] > fxyz[1:-1, 1:-1, 1:-1] + accuracy
+		yzmm = fxyz[1:-1, :-2, :-2] > fxyz[1:-1, 1:-1, 1:-1] + accuracy
+		c2 = xypp & xypm & xymp & xymm & xzpp & xzpm & xzmp & xzmm & yzpp & yzpm & yzmp & yzmm
+		mincond = c1 & c2
+	# Apply nineteen-point interpolator
+	if nineteen_point:
+		jxmin = jx[1:-1, 1:-1, 1:-1][mincond]
+		jymin = jy[1:-1, 1:-1, 1:-1][mincond]
+		jzmin = jz[1:-1, 1:-1, 1:-1][mincond]
+		for i, j, k in zip(jxmin, jymin, jzmin):
+			ff, xx, hess = nineteen_point_extremum_solver(xyz[i-1:i+2, j-1:j+2, k-1:k+2], fxyz[i-1:i+2, j-1:j+2, k-1:k+2])
+			cc = invmasses_3d_from_abcdef(hess, cylindrical=cylindrical, spherical=spherical, degrees=degrees, xyz=xx)
+			ex_min.append(BandExtremum("min", tuple(xx), ff, cc))
+	else:
+		xyzmin = xyz[1:-1, 1:-1, 1:-1][mincond]
+		fmin = fxyz[1:-1, 1:-1, 1:-1][mincond]
+		ex_min = [BandExtremum("min", tuple(xx), ff, (0.0, 0.0, 0.0)) for ff, xx in zip(fmin, xyzmin)]
+
+	# Maxima
+	ex_max = []
+	with np.errstate(invalid = "ignore"):  # Suppress "RuntimeWarning: invalid value encountered in greater" (nan values)
+		xp = fxyz[2:, 1:-1, 1:-1] < fxyz[1:-1, 1:-1, 1:-1] - accuracy
+		xm = fxyz[:-2, 1:-1, 1:-1] < fxyz[1:-1, 1:-1, 1:-1] - accuracy
+		yp = fxyz[1:-1, 2:, 1:-1] < fxyz[1:-1, 1:-1, 1:-1] - accuracy
+		ym = fxyz[1:-1, :-2, 1:-1] < fxyz[1:-1, 1:-1, 1:-1] - accuracy
+		zp = fxyz[1:-1, 1:-1, 2:] < fxyz[1:-1, 1:-1, 1:-1] - accuracy
+		zm = fxyz[1:-1, 1:-1, :-2] < fxyz[1:-1, 1:-1, 1:-1] - accuracy
+		c1 = xp & xm & yp & ym & zp & zm
+		xypp = fxyz[2:, 2:, 1:-1] < fxyz[1:-1, 1:-1, 1:-1] - accuracy
+		xymp = fxyz[:-2, 2:, 1:-1] < fxyz[1:-1, 1:-1, 1:-1] - accuracy
+		xypm = fxyz[2:, :-2, 1:-1] < fxyz[1:-1, 1:-1, 1:-1] - accuracy
+		xymm = fxyz[:-2, :-2, 1:-1] < fxyz[1:-1, 1:-1, 1:-1] - accuracy
+		xzpp = fxyz[2:, 1:-1, 2:] < fxyz[1:-1, 1:-1, 1:-1] - accuracy
+		xzmp = fxyz[:-2, 1:-1, 2:] < fxyz[1:-1, 1:-1, 1:-1] - accuracy
+		xzpm = fxyz[2:, 1:-1, :-2] < fxyz[1:-1, 1:-1, 1:-1] - accuracy
+		xzmm = fxyz[:-2, 1:-1, :-2] < fxyz[1:-1, 1:-1, 1:-1] - accuracy
+		yzpp = fxyz[1:-1, 2:, 2:] < fxyz[1:-1, 1:-1, 1:-1] - accuracy
+		yzmp = fxyz[1:-1, :-2, 2:] < fxyz[1:-1, 1:-1, 1:-1] - accuracy
+		yzpm = fxyz[1:-1, 2:, :-2] < fxyz[1:-1, 1:-1, 1:-1] - accuracy
+		yzmm = fxyz[1:-1, :-2, :-2] < fxyz[1:-1, 1:-1, 1:-1] - accuracy
+		c2 = xypp & xypm & xymp & xymm & xzpp & xzpm & xzmp & xzmm & yzpp & yzpm & yzmp & yzmm
+		maxcond = c1 & c2
+	# Apply nineteen-point interpolator
+	if nineteen_point:
+		jxmax = jx[1:-1, 1:-1, 1:-1][maxcond]
+		jymax = jy[1:-1, 1:-1, 1:-1][maxcond]
+		jzmax = jz[1:-1, 1:-1, 1:-1][maxcond]
+		for i, j, k in zip(jxmax, jymax, jzmax):
+			ff, xx, hess = nineteen_point_extremum_solver(xyz[i-1:i+2, j-1:j+2, k-1:k+2], fxyz[i-1:i+2, j-1:j+2, k-1:k+2])
+			cc = invmasses_3d_from_abcdef(hess, cylindrical=cylindrical, spherical=spherical, degrees=degrees, xyz=xx)
+			ex_max.append(BandExtremum("max", tuple(xx), ff, cc))
+	else:
+		xyzmax = xyz[1:-1, 1:-1, 1:-1][maxcond]
+		fmax = fxyz[1:-1, 1:-1, 1:-1][maxcond]
+		ex_max = [BandExtremum("max", tuple(xx), ff, (0.0, 0.0, 0.0)) for ff, xx in zip(fmax, xyzmax)]
+
+	return ex_min + ex_max
+
+def local_extrema_cylindrical_zero(xyz, fxyz, accuracy = 0.0, nineteen_point = True, degrees = True):
+	"""Find local extrema in 3D cylindrical coordinates at zero.
+
+	Arguments:
+	xyz          Array. Values for (x, y, z).
+	fxyz         Array. Values for f(x, y, z).
+	accuracy     Float. If the values of the function at neighbouring points are
+	             no more than this value apart, do not consider an extremum at
+	             this point.
+	nineteen_point   True or False. Whether to use the nineteen-point extremum solver.
+	degrees      True or False. Whether angular coordinates are in degrees.
+
+	Returns:
+	List of BandExtremum instances.
+	"""
+	aunit = 1 if degrees else 180 / np.pi
+	xyz = np.asarray(xyz)
+	fxyz = np.asarray(fxyz)
+
+	# Angles
+	phi = xyz[0, :, 0, 1]
+	phimin = phi.min()
+	phimax = phi.max()
+	if len(phi) > 1 and phimin == phimax:
+		raise ValueError("Singular input values (phi)")
+
+	# Radii
+	r = xyz[:, 0, 0, 0]
+	j0 = None
+	for jr, rr in enumerate(r):
+		if abs(rr) < 1e-7:
+			j0 = jr
+	if j0 is None:
+		return []
+	if j0 == len(r) - 1:
+		return []
+	dr = r[j0+1]
+
+	# z coordinates
+	z = xyz[0, 0, :, 2]
+
+	# Indices for angles 0, 45, 90, 135 degrees modulo 180
+	i0 = (np.abs(np.mod(phi * aunit + 90, 180) - 90) < 1e-6)
+	i45 = (np.abs(np.mod(phi * aunit, 180) - 45) < 1e-6)
+	i90 = (np.abs(np.mod(phi * aunit, 180) - 90) < 1e-6)
+	i135 = (np.abs(np.mod(phi * aunit, 180) - 135) < 1e-6)
+
+	if np.count_nonzero(i0) == 0 or np.count_nonzero(i90) == 0:
+		return []
+	f0 = np.mean(fxyz[j0, :, :], axis = 0)
+	# Note that the phi axis is axis 0 because the j0 index eliminates the r axis
+	df0 = np.mean(fxyz[j0 + 1, i0, :] - fxyz[j0, i0, :], axis = 0)
+	df90 = np.mean(fxyz[j0 + 1, i90, :] - fxyz[j0, i90, :], axis = 0)
+
+	# Construct a cartesian grid around zero; keep z axis intact
+	if np.count_nonzero(i45) > 0 and np.count_nonzero(i135) > 0:
+		df45 = np.mean(fxyz[j0 + 1, i45, :] - fxyz[j0, i45, :], axis = 0)
+		df135 = np.mean(fxyz[j0 + 1, i135, :] - fxyz[j0, i135, :], axis = 0)
+		nmass = 3
+	elif np.count_nonzero(i45) > 0:
+		df45 = np.mean(fxyz[j0 + 1, i45, :] - fxyz[j0, i45, :], axis = 0)
+		df135 = df45
+		nmass = 2
+	elif np.count_nonzero(i135) > 0:
+		df135 = np.mean(fxyz[j0 + 1, i135, :] - fxyz[j0, i135, :], axis = 0)
+		df45 = df135
+		nmass = 2
+	else:
+		df45 = (df0 + df90) / 2
+		df135 = (df0 + df90) / 2
+		nmass = 1
+	xdata = np.stack(np.meshgrid([-dr, 0, dr], [-dr, 0, dr], z, indexing='ij'), axis = -1)
+	ydata = f0 + np.array(
+		[[2 * df45, df0, 2 * df135],
+		[df90, 0 * f0, df90],
+		[2 * df135, df0, 2 * df45]])
+
+	# Apply 3d extrema finder on cartesian grid around zero
+	excart = local_extrema_3d(xdata, ydata, extend = [False, False, False, False, 0.0, 0.0], accuracy = accuracy)
+
+	for ex in excart:
+		# Convert momentum to cylindrical coordinates
+		ex.k = (*to_polar(ex.k[0], ex.k[1], degrees), ex.k[2])
+		# Invalidate estimated masses, if data at 45 and/or 135 degrees is missing
+		if nmass == 2:
+			sortedmass = np.sort(ex.mass)
+			if np.abs(sortedmass[1] - sortedmass[0]) < 1e-10:
+				ex.mass = (sortedmass[0], float('nan'), sortedmass[2])
+			elif np.abs(sortedmass[2] - sortedmass[1]) < 1e-10:
+				ex.mass = (sortedmass[1], float('nan'), sortedmass[0])
+		elif nmass == 1:
+			sortedmass = np.sort(ex.mass)
+			if np.abs(sortedmass[1] - sortedmass[0]) < 1e-10:
+				ex.mass = (float('nan'), float('nan'), sortedmass[2])
+			elif np.abs(sortedmass[2] - sortedmass[1]) < 1e-10:
+				ex.mass = (float('nan'), float('nan'), sortedmass[0])
+	return excart
+
+def local_extrema_spherical_zero(xyz, fxyz, accuracy = 0.0, nineteen_point = True, degrees = True):
+	"""Find local extrema in 3D spherical coordinates at zero and around z axis
+
+	Arguments:
+	xyz          Array. Values for (x, y, z).
+	fxyz         Array. Values for f(x, y, z).
+	accuracy     Float. If the values of the function at neighbouring points are
+	             no more than this value apart, do not consider an extremum at
+	             this point.
+	nineteen_point   True or False. Whether to use the nineteen-point extremum solver.
+	degrees      True or False. Whether angular coordinates are in degrees.
+
+	Returns:
+	List of BandExtremum instances.
+	"""
+	aunit = 1 if degrees else 180 / np.pi
+	xyz = np.asarray(xyz)
+	fxyz = np.asarray(fxyz)
+
+	# Angles
+	theta = xyz[0, :, 0, 1]
+	thetamin = theta.min()
+	thetamax = theta.max()
+	if len(theta) > 1 and thetamin == thetamax:
+		raise ValueError("Singular input values (theta)")
+	phi = xyz[0, 0, :, 2]
+	phimin = phi.min()
+	phimax = phi.max()
+	if len(phi) > 1 and phimin == phimax:
+		raise ValueError("Singular input values (phi)")
+
+	# Radii
+	r = xyz[:, 0, 0, 0]
+	j0 = None
+	for jr, rr in enumerate(r):
+		if abs(rr) < 1e-7:
+			j0 = jr
+	if j0 is None:
+		return []
+	if j0 == len(r) - 1:
+		return []
+	dr = r[j0+1]
+
+	# Indices for phi = 0, 45, 90, 135 degrees modulo 180
+	ip0 = (np.abs(np.mod(phi * aunit + 90, 180) - 90) < 1e-6)
+	ip45 = (np.abs(np.mod(phi * aunit, 180) - 45) < 1e-6)
+	ip90 = (np.abs(np.mod(phi * aunit, 180) - 90) < 1e-6)
+	ip135 = (np.abs(np.mod(phi * aunit, 180) - 135) < 1e-6)
+	# Indices for theta = 0, 45, 90, 135, 180
+	it0 = (np.abs(theta * aunit - 0) < 1e-6)
+	it45 = (np.abs(theta * aunit - 45) < 1e-6)
+	it90 = (np.abs(theta * aunit - 90) < 1e-6)
+	it135 = (np.abs(theta * aunit - 135) < 1e-6)
+	it180 = (np.abs(theta * aunit - 180) < 1e-6)
+
+	if np.count_nonzero(ip0) == 0 or np.count_nonzero(ip90) == 0 or np.count_nonzero(it90) == 0:
+		return []
+	f0 = np.mean(fxyz[j0, :, :])
+	# Note that the phi axis is axis 0 because the j0 index eliminates the r axis
+	df_90_0 = np.mean(fxyz[j0 + 1, it90, ip0] - fxyz[j0, it90, ip0])  # x
+	df_90_90 = np.mean(fxyz[j0 + 1, it90, ip90] - fxyz[j0, it90, ip90])  # y
+
+	# Construct a cartesian grid around zero; first in plane
+	if np.count_nonzero(ip45) > 0 and np.count_nonzero(ip135) > 0:
+		df_90_45 = np.mean(fxyz[j0 + 1, it90, ip45] - fxyz[j0, it90, ip45])  # +x +y
+		df_90_135 = np.mean(fxyz[j0 + 1, it90, ip135] - fxyz[j0, it90, ip135])  # +x -y
+	elif np.count_nonzero(ip45) > 0:
+		df_90_45 = np.mean(fxyz[j0 + 1, it90, ip45] - fxyz[j0, it90, ip45])
+		df_90_135 = df_90_45
+	elif np.count_nonzero(ip135) > 0:
+		df_90_135 = np.mean(fxyz[j0 + 1, it90, ip135] - fxyz[j0, it90, ip135])
+		df_90_45 = df_90_135
+	else:
+		df_90_45 = (df_90_90 + df_90_0) / 2
+		df_90_135 = (df_90_90 + df_90_0) / 2
+	df_0 = np.mean(fxyz[j0 + 1, it0, :] - fxyz[j0, it0, :])  # z+
+	if np.count_nonzero(it180) > 0:
+		df_180 = np.mean(fxyz[j0 + 1, it180, :] - fxyz[j0, it180, :])  # z-
+	else:
+		df_180 = df_0
+
+	dim = 3
+	strict = False
+	# To demand theta = 135 for 3d, put strict = True. If it is set to False,
+	# missing data at theta = 135 will be subtituted from theta = 45. Note that
+	# this may lead to spurious minima at zero if the dispersion lacks the
+	# mirror symmetry kz to -kz. For bulk band structures, the symmetry is
+	# preserved, so setting strict = False should be safe.
+	# TODO: Make config value
+	if np.count_nonzero(it45) > 0:
+		df_45_0 = np.mean(fxyz[j0 + 1, it45, ip0] - fxyz[j0, it45, ip0])  # z+ x
+		df_45_90 = np.mean(fxyz[j0 + 1, it45, ip90] - fxyz[j0, it45, ip90])  # z+ y
+	else:
+		df_45_0, df_45_90 = None, None
+		dim = 2
+	if np.count_nonzero(it135) > 0:
+		df_135_0 = np.mean(fxyz[j0 + 1, it135, ip0] - fxyz[j0, it135, ip0])  # z- x
+		df_135_90 = np.mean(fxyz[j0 + 1, it135, ip90] - fxyz[j0, it135, ip90])  # z- y
+	elif strict:
+		df_135_0, df_135_90 = None, None
+		dim = 2
+	else:
+		df_135_0, df_135_90 = df_45_0, df_45_90
+
+	# Choose between 2d and 3d extrema analysis
+	if dim == 3:
+		xdata = np.stack(np.meshgrid([-dr, 0, dr], [-dr, 0, dr], [-dr, 0, dr], indexing='ij'), axis = -1)
+		ydata = f0 + np.array(
+			[[[0, 2 * df_135_0, 0],
+			[2 * df_135_90, df_180, 2 * df_135_90],
+			[0, 2 * df_135_0, 0]],
+			[[2 * df_90_45, df_90_0, 2 * df_90_135],
+			[df_90_90, 0, df_90_90],
+			[2 * df_90_135, df_90_0, 2 * df_90_45]],
+			[[0, 2 * df_45_0, 0],
+			[2 * df_45_90, df_0, 2 * df_45_90],
+			[0, 2 * df_45_0, 0]]])  # the corner values (set to 0) are ignored
+		# Apply 3d extrema finder on cartesian grid around zero
+		ex_zero = local_extrema_3d(xdata, ydata, extend = False, accuracy = accuracy)
+		for ex in ex_zero:
+			# Convert momentum to spherical coordinates
+			ex.k = to_spherical(*ex.k, degrees)
+	else:
+		xdata = np.stack(np.meshgrid([-dr, 0, dr], [-dr, 0, dr], indexing='ij'), axis = -1)
+		ydata = f0 + np.array(
+			[[2 * df_90_45, df_90_0, 2 * df_90_135],
+			[df_90_90, 0, df_90_90],
+			[2 * df_90_135, df_90_0, 2 * df_90_45]])
+		# Apply 2d extrema finder on cartesian grid around zero
+		ex_zero = local_extrema_2d(xdata, ydata, extend = False, accuracy = accuracy)
+		for ex in ex_zero:
+			# Convert momentum to spherical coordinates
+			ex.k = to_spherical(*ex.k, 0.0, degrees)
+			ex.mass = (*ex.mass, float('nan'))
+
+	# TODO: Substitute nan values for masses where dispersion data is missing,
+	# similar to nmass in local_extrema_cylindrical_zero()
+	return ex_zero
+
+def local_extrema_spherical_zaxis(xyz, fxyz, accuracy = 0.0, nineteen_point = True, degrees = True):
+	"""Find local extrema in 3D spherical coordinates around the z axis (not zero)
+
+	Arguments:
+	xyz          Array. Values for (x, y, z).
+	fxyz         Array. Values for f(x, y, z).
+	accuracy     Float. If the values of the function at neighbouring points are
+	             no more than this value apart, do not consider an extremum at
+	             this point.
+	nineteen_point   True or False. Whether to use the nineteen-point extremum solver.
+	degrees      True or False. Whether angular coordinates are in degrees.
+
+	Returns:
+	List of BandExtremum instances.
+	"""
+	aunit = 1 if degrees else 180 / np.pi
+	xyz = np.asarray(xyz)
+	fxyz = np.asarray(fxyz)
+
+	# Angles
+	theta = xyz[0, :, 0, 1]
+	thetamin = theta.min()
+	thetamax = theta.max()
+	if len(theta) > 1 and thetamin == thetamax:
+		raise ValueError("Singular input values (theta)")
+	phi = xyz[0, 0, :, 2]
+	phimin = phi.min()
+	phimax = phi.max()
+	if len(phi) > 1 and phimin == phimax:
+		raise ValueError("Singular input values (phi)")
+
+	# Radii
+	r = xyz[:, 0, 0, 0]
+	j0 = None
+	for jr, rr in enumerate(r):
+		if abs(rr) < 1e-7:
+			j0 = jr
+	if j0 is None:
+		return []
+	if j0 == len(r) - 1:
+		return []
+	dr = r[j0+1]
+
+	# Indices for phi = 0, 45, 90, 135 degrees modulo 180
+	ip0 = (np.abs(np.mod(phi * aunit + 90, 180) - 90) < 1e-6)
+	ip45 = (np.abs(np.mod(phi * aunit, 180) - 45) < 1e-6)
+	ip90 = (np.abs(np.mod(phi * aunit, 180) - 90) < 1e-6)
+	ip135 = (np.abs(np.mod(phi * aunit, 180) - 135) < 1e-6)
+	# Indices for theta = 0, 180
+	it0 = (np.abs(theta * aunit - 0) < 1e-6)
+	it180 = (np.abs(theta * aunit - 180) < 1e-6)
+
+	if np.count_nonzero(ip0) == 0 or np.count_nonzero(ip90) == 0:
+		return []
+
+	exs_zaxis = []  # Holds lists of 1d extrema at identical values of r, theta
+	exs_zaxis_k = []  # Corresponding values of r, theta
+
+	# Do polar extrema analysis in (r, theta) for phi = 0, 45, 90, 135
+	for p, ip in [(0, ip0), (45, ip45), (90, ip90), (135, ip135)]:
+		if np.count_nonzero(ip) == 0:
+			continue
+
+		xdata = np.dstack(np.meshgrid(r, theta, indexing='ij'))
+		ydata = np.mean(fxyz[:, :, ip], axis = 2)
+		expol = local_extrema_2d(xdata, ydata, extend = [False, False, 0.0, 180.0 / aunit], accuracy = accuracy, polar = True, degrees = degrees)
+		for ex in expol:
+			# Discard if r == 0 or theta != 0, 180
+			if ex.k[0] <= 0.5 * dr:
+				continue
+			if not (abs(ex.k[1] * aunit) < 1e-6 or abs(ex.k[1] * aunit - 180) < 1e-6):
+				continue
+			# If there is a list of extrema at this momentum already in
+			# exs_zaxis, add the extremum here. Otherwise, create a new list.
+			exz_idx = None
+			for i_ex, exzk in enumerate(exs_zaxis_k):
+				if np.amax(np.abs(np.array(exzk) - np.array(ex.k))) < 1e-6:
+					exz_idx = i_ex
+					break
+			if exz_idx is None:
+				exs_zaxis.append([ex])
+				exs_zaxis_k.append(tuple(k for k in ex.k))  # force copy
+				exz_idx = -1
+			else:
+				exs_zaxis[exz_idx].append(ex)
+			# Add phi coordinate to present extremum
+			exs_zaxis[exz_idx][-1].k = (*exs_zaxis[exz_idx][-1].k, float(p) * aunit)
+
+	ex_zaxis = []
+	# Iterate over all lists in exs_zaxis
+	for exs in exs_zaxis:
+		invmasses = []
+		for ex in exs:
+			# Try to figure out which mass is along the radial direction:
+			# Extract function values along radial axis (for theta = 0, 180 and
+			# phi mod 180) and apply the 1d extremum solver.
+			r1, theta1, phi1 = ex.k
+			invmass = -hbarm0 / np.array(ex.mass)
+			it = it0 if theta1 * aunit < 90 else it180
+			ip = ip0 if phi1 == 0 else (np.abs(np.mod(phi * aunit, 180) - phi1) < 1e-6)
+			f_r = np.mean(np.mean(fxyz[:, :, ip], axis=2)[:, it], axis=1)
+			extrema_r = local_extrema_1d(r, f_r, extend = [False, False], accuracy = accuracy)
+			# Find the 1D extremum matching the k value of the present 2D
+			# extremum ex. Identify which of the two mass values of the 2D
+			# extremum match the 1D mass. This is the radial mass. Put it at the
+			# first position and store the result in invmasses.
+			for ex_r in extrema_r:
+				if abs(ex_r.k - r1) < 1e-6:
+					invmass_r = -hbarm0 / ex_r.mass
+					diff = np.abs(invmass - invmass_r)
+					order = np.argsort(np.abs(diff))
+					if np.min(diff) < 1e-6:
+						invmass = invmass[order]
+
+			invmasses.append(invmass)
+
+		# Extract the arrays of radial and angular inverse masses
+		invmasses_r, invmasses_ang = np.array(invmasses).transpose()
+		# Extract a list of phi values
+		phival = [int(np.round(ex.k[2] * aunit)) for ex in exs]
+		# If all values are (almost) identical, extract the radial inverse mass
+		# of this series.
+		if np.amax(np.abs(invmasses_r - invmasses_r[0])) < 1e-6:
+			invmass_r = np.mean(invmasses_r)
+		else:
+			invmass_r = float('nan')
+		# Extract angular inverse masses based on the values at phi = 0, 45, 90,
+		# 135. If some angle are missing, the number of meaningful mass values
+		# is reduced.
+		if phival == [0, 45, 90, 135]:
+			a, c45, b, c135 = invmasses_ang
+			c = (c45 - c135) / 2
+			q = (a - b)**2 + 4 * c**2
+			if q >= 0.0:
+				invmass_ang = 0.5 * (a + b + np.sqrt(q)), 0.5 * (a + b - np.sqrt(q))
+			else:
+				invmass_ang = 0.5 * (a + b + 1.j * np.sqrt(-q)), 0.5 * (a + b - 1.j * np.sqrt(-q))
+		elif 0 in phival and 90 in phival:
+			a = invmasses_ang[phival.index(0)]
+			b = invmasses_ang[phival.index(90)]
+			invmass_ang = (a + b) / 2, float('nan')
+		else:
+			invmass_ang = float('nan'), float('nan')
+
+		invmass = (invmass_r, *invmass_ang)
+		# Construct BandExtremum object that will be returned below
+		ex_zaxis.append(BandExtremum(exs[0].minmax, exs[0].k, exs[0].energy, invmass))
+
+	return ex_zaxis
+
+
+def band_local_extrema(data, do_print = True, accuracy = 1e-8):
+	"""Get local band extrema (main function)
+
+	Arguments:
+	data          DiagData instance
+	do_print      True or False. Whether to print the results to stdout.
+	accuracy      Float. Accuracy used for the extremum detection. See
+	              local_extrema_1d(), for example.
+
+	Returns:
+	bands_extrema  A dict instance, whose keys are the band labels (band index
+	               or band & LL index) and values are lists of BandExtremum
+	               instances. On failure, return None.
+	"""
+	if len(data) <= 1:
+		sys.stderr.write("Warning (band_local_extrema): Insufficient dispersion data.\n")
+		return None
+
+	data_k0 = data.get_zero_point()
+	if data_k0 is None:
+		sys.stderr.write("Warning (band_local_extrema): Zero momentum not included in data. Minima and maxima at zero momentum may be missed.\n")
+		data_k0 = data.get_base_point()  # Take base point instead
+
+	# Get eigenvalues belonging to each continuous subband that starts at zero (first k)
+	if data_k0.bindex is None:
+		sys.stderr.write("ERROR (band_local_extrema): Band indices are needed for extremal-value calculation, but they are missing.\n")
+		return None
+
+	data_labels, mode = data.get_data_labels(by_index = True)
+	if mode != 'index' or data_labels is None:
+		sys.stderr.write("Warning (band_local_extrema): Band connectivity between momentum values could not be determined. Extremal-value calculation does not succeed.\n")
+		return None
+
+	# Get grid properties
+	if not isinstance(data.grid, VectorGrid):
+		sys.stderr.write("ERROR (band_local_extrema): A VectorGrid is required, but not present. Extremal-value calculation does not succeed.\n")
+		return None
+	val, var, constval, const = data.grid.get_var_const()
+	grid_kwds = {'astype': data.grid.vtype, 'deg': data.grid.degrees, 'prefix': data.grid.prefix}
+	degrees = data.grid.degrees  # shorthand
+	aunit = 1 if degrees else 180 / np.pi  # angle units
+
+	# Iterate over bands
+	bands_extrema = {}
+	for lb in data_labels:
+		bands_extrema[lb] = []
+		xdata, ydata = data.get_plot_coord(lb, mode)
+		if len(data.shape) == 1:  # 1D
+			xdata = val
+			bands_extrema[lb] = local_extrema_1d(xdata, ydata, extend = 0)
+		elif len(data.shape) == 2:
+			xdata, ydata = data.get_plot_coord(lb, 'index2d')
+			if var == ('k', 'kphi'):  # implied: data.grid.vtype in ['pol', 'cyl', 'sph']
+				xdata = np.array([[k.polar(deg = degrees) for k in kk] for kk in xdata])
+				phimin, phimax = xdata[0][0][1] * aunit, xdata[-1][-1][1] * aunit
+				extend_phi_min = (abs(np.remainder(phimin + 22.5, 45.0) - 22.5) < 1e-8)
+				extend_phi_max = (abs(np.remainder(phimax + 22.5, 45.0) - 22.5) < 1e-8)
+				bands_extrema[lb] = local_extrema_polar_zero(xdata, ydata, accuracy = accuracy, degrees = degrees)
+				if len(bands_extrema[lb]) > 0 and np.isnan(bands_extrema[lb][-1].energy):
+					bands_extrema[lb] = []
+				b_extrema = local_extrema_2d(xdata, ydata, extend = [False, False, extend_phi_min, extend_phi_max], polar = True, accuracy = accuracy, degrees = degrees)
+				for b_ex in b_extrema:
+					if not np.isnan(b_ex.energy):
+						bands_extrema[lb].append(b_ex)
+			elif data.grid.vtype == 'xy':
+				xdata = np.array([[k.xy() for k in kk] for kk in xdata])
+				b_extrema = local_extrema_2d(xdata, ydata, extend = 0)
+				bands_extrema[lb] = [b_ex for b_ex in b_extrema if not np.isnan(b_ex.energy)]
+			elif data.grid.vtype in ['xyz', 'cyl']:
+				if 'k' in var and 'kz' in var and const == 'kphi':
+					xdata = np.array([[[k.value[0], k.value[2]] for k in kk] for kk in xdata])
+				elif 'kx' in var and 'ky' in var and const == 'kz':
+					xdata = np.array([[[k.x(), k.y()] for k in kk] for kk in xdata])
+				elif 'kx' in var and 'kz' in var and const == 'ky':
+					xdata = np.array([[[k.x(), k.z()] for k in kk] for kk in xdata])
+				elif 'ky' in var and 'kz' in var and const == 'kx':
+					xdata = np.array([[[k.y(), k.z()] for k in kk] for kk in xdata])
+				else:
+					sys.stderr.write("ERROR (band_local_extrema): Illegal combination of components for 2D grid. Extremal-value calculation does not succeed.\n")
+					return None
+				b_extrema = local_extrema_2d(xdata, ydata, extend = 0)
+				bands_extrema[lb] = [b_ex for b_ex in b_extrema if not np.isnan(b_ex.energy)]
+			else:
+				sys.stderr.write("ERROR (band_local_extrema): Illegal combination of components for 2D grid. Extremal-value calculation does not succeed.\n")
+				return None
+		elif len(data.shape) == 3:
+			if data.grid.vtype == 'xyz':
+				xdata, ydata = data.get_plot_coord(lb, 'index')
+				xdata = np.array([v.value for v in xdata]).reshape(data.grid.shape + (3,))
+				ydata = np.asarray(ydata).reshape(data.grid.shape)
+				b_extrema = local_extrema_3d(xdata, ydata, extend = 0)
+				bands_extrema[lb] = [b_ex for b_ex in b_extrema if not np.isnan(b_ex.energy)]
+			elif data.grid.vtype == 'cyl':
+				xdata, ydata = data.get_plot_coord(lb, 'index')
+				xdata = np.array([v.value for v in xdata]).reshape(data.grid.shape + (3,))
+				ydata = np.asarray(ydata).reshape(data.grid.shape)
+				phimin, phimax = xdata[0][0][0][1] * aunit, xdata[-1][-1][-1][1] * aunit
+				extend_phi_min = (abs(np.remainder(phimin + 22.5, 45.0) - 22.5) < 1e-8)
+				extend_phi_max = (abs(np.remainder(phimax + 22.5, 45.0) - 22.5) < 1e-8)
+				bands_extrema[lb] = local_extrema_cylindrical_zero(xdata, ydata, accuracy = accuracy, degrees = degrees)
+				b_extrema = local_extrema_3d(xdata, ydata, extend = [False, False, extend_phi_min, extend_phi_max, False, False], cylindrical = True, accuracy = accuracy, degrees = degrees)
+				bands_extrema[lb].extend([b_ex for b_ex in b_extrema if not np.isnan(b_ex.energy)])
+			elif data.grid.vtype == 'sph':
+				xdata, ydata = data.get_plot_coord(lb, 'index')
+				xdata = np.array([v.value for v in xdata]).reshape(data.grid.shape + (3,))
+				ydata = np.asarray(ydata).reshape(data.grid.shape)
+				thetamin, thetamax = xdata[0][0][0][1] * aunit, xdata[-1][-1][-1][1] * aunit
+				phimin, phimax = xdata[0][0][0][2] * aunit, xdata[-1][-1][-1][2] * aunit
+				extend_theta_min = (abs(np.remainder(thetamin + 45, 90.0) - 45) < 1e-8)
+				extend_theta_max = (abs(np.remainder(thetamax + 45, 90.0) - 45) < 1e-8)
+				extend_phi_min = (abs(np.remainder(phimin + 22.5, 45.0) - 22.5) < 1e-8)
+				extend_phi_max = (abs(np.remainder(phimax + 22.5, 45.0) - 22.5) < 1e-8)
+				bands_extrema[lb] = local_extrema_spherical_zero(xdata, ydata, accuracy = accuracy, degrees = degrees)
+				bands_extrema[lb].extend(local_extrema_spherical_zaxis(xdata, ydata, accuracy = accuracy, degrees = degrees))
+				b_extrema = local_extrema_3d(xdata, ydata, extend = [False, False, extend_theta_min, extend_theta_max, extend_phi_min, extend_phi_max], spherical = True, accuracy = accuracy, degrees = degrees)
+				bands_extrema[lb].extend([b_ex for b_ex in b_extrema if not np.isnan(b_ex.energy)])
+			else:
+				sys.stderr.write("ERROR (band_local_extrema): Not implemented for 3D cylindrical and spherical grids. Extremal-value calculation does not succeed.\n")
+				return None
+		else:
+			raise ValueError("Invalid value for data.shape")
+
+	# Postprocessing
+	for lb in bands_extrema:
+		# Vectorize the momenta; this step cannot be skipped!
+		for b_ex in bands_extrema[lb]:
+			b_ex.vectorize_momentum(var, constval, const, **grid_kwds)
+		# Enter character, band index, ll index
+		if isinstance(lb, tuple):
+			bt = data_k0.get_char(lb) if data_k0 is not None and data_k0.char is not None else None
+			for b_ex in bands_extrema[lb]:
+				b_ex.llindex = lb[0]
+				b_ex.bindex = lb[1]
+				b_ex.char = bt  # implicit None is fine
+		else:
+			bt = data_k0.get_char((lb,)) if data_k0 is not None and data_k0.char is not None else None
+			for b_ex in bands_extrema[lb]:
+				b_ex.bindex = lb
+				b_ex.char = bt  # implicit None is fine
+
+	return bands_extrema
+
+def print_band_extrema(bands_extrema):
+	"""Print band extrema result to stdout.
+
+	Arguments:
+	bands_extrema  A dict instance, whose keys are the band labels (band index
+	               or band & LL index) and values are lists of BandExtremum
+	               instances. This is the return value of band_local_extrema().
+	"""
+	if bands_extrema is None:
+		sys.stderr.write("Warning (print_band_extrema): No data.\n")
+		return
+	unicodewarn = False
+
+	# Display the results
+	print("Bands local extrema and inertial masses:")
+	for b in reversed(sorted(bands_extrema.keys())):
+		if len(bands_extrema[b]) == 0:
+			continue
+		bt = bands_extrema[b][0].char
+		bshape = band_shape(bands_extrema[b]) if len(bands_extrema[b]) > 0 else '??'
+		if bt is None:
+			print("Band %3i       :" % b, bshape)
+		else:
+			bandstr = "Band %3i (%-4s): %s" % (b, bt.replace('G', '\u0393'), bshape)
+			try:
+				print(bandstr)
+			except UnicodeEncodeError:
+				sys.stdout.buffer.write(bandstr.encode('utf-8') + b'\n')  # force unicode encoding
+				unicodewarn = True
+
+		for j, b_ex in enumerate(bands_extrema[b]):
+			print(" " * 16, str(b_ex))
+		print()
+	print()
+	if unicodewarn:
+		sys.stderr.write("Warning (print_bands_extrema): Some symbols could not be encoded in the output encoding (%s) and were forcibly converted to UTF-8. You may try to use 'export PYTHONIOENCODING=utf8' to get rid of this warning.\n" % sys.stdout.encoding)
+
+def print_gap_information(bands_extrema, ref_data):
+	"""Print information on the charge neutral gap to stdout.
+
+	Arguments:
+	bands_extrema  A dict instance, whose keys are the band labels (band index
+	               or band & LL index) and values are lists of BandExtremum
+	               instances. This is the return value of band_local_extrema().
+	ref_data       DiagData instance. Result of diagonalization.
+	"""
+	if bands_extrema is None:
+		sys.stderr.write("Warning (print_gap_information): No data.\n")
+		return
+	unicodewarn = False
+	# Print information about the gap
+	if 1 in bands_extrema and -1 in bands_extrema and len(bands_extrema[1]) > 0 and len(bands_extrema[-1]) > 0:
+		# Find minimum and maximum of bands above and below the 'gap', respectively.
+		b_p_min = bands_extrema[1][0]
+		b_m_max = bands_extrema[-1][0]
+		bt_p = b_p_min.char
+		bt_m = b_m_max.char
+
+		if bt_p is None or bt_m is None:
+			print("At neutrality, between band -1 and band 1, there is:")
+		else:
+			gapstr = ("At neutrality, between band -1 (%s) and band 1 (%s), there is:" % (bt_m.replace('G', '\u0393'), bt_p.replace('G', '\u0393')))
+			try:
+				print(gapstr)
+			except UnicodeEncodeError:
+				sys.stdout.buffer.write(gapstr.encode('utf-8') + b'\n')  # force unicode encoding
+				unicodewarn = True
+		for b_ex in bands_extrema[1][1:]:
+			if b_ex.energy < b_p_min.energy:
+				b_p_min = b_ex
+		for b_ex in bands_extrema[-1][1:]:
+			if b_ex.energy > b_m_max.energy:
+				b_m_max = b_ex
+		e_p_min = np.nanmin(ref_data.get_plot_coord(1, 'index')[1])
+		e_m_max = np.nanmax(ref_data.get_plot_coord(-1, 'index')[1])
+		p_min_ok = b_p_min.minmax == 'min' and b_p_min.energy <= e_p_min
+		m_max_ok = b_m_max.minmax == 'max' and b_m_max.energy >= e_m_max
+		if e_m_max >= e_p_min:
+			print("No gap.")
+			print("The bands overlap between %.2f and %.2f meV (delta = %.2f meV)." % (e_p_min, e_m_max, e_m_max - e_p_min))
+		elif p_min_ok and m_max_ok:
+			if b_p_min.k == 0.0 and b_m_max.k == 0.0:
+				print("A direct gap at k = 0.")
+			elif b_p_min.k == b_m_max.k:
+				print("A direct gap at k = %s." % b_p_min.k)
+			else:
+				print("An indirect gap.")
+			print("The gap is between %.2f and %.2f meV (delta = %.2f meV)." % (e_m_max, e_p_min, e_p_min - e_m_max))
+		else:
+			print("A gap of unknown nature.")
+			print("The gap is between %.2f and %.2f meV (delta = %.2f meV)." % (e_m_max, e_p_min, e_p_min - e_m_max))
+			sys.stderr.write("Warning (print_gap_information): The locations of the band extrema could not be found properly. You may wish to increase the resolution and/or the k region.\n")
+		if len(ref_data.shape) == 1:
+			print("NOTE: You have requested data along an axis, but the band extrema might lie elsewhere. You may wish to plot a higher dimensional dispersion, e.g., using the arguments 'k ... kphi ...' or 'kx ... ky ...'.")
+		print()
+
+	if len(ref_data.shape) == 1:
+		sys.stderr.write("Warning (print_gap_information): Extrema and gap data might not give a full picture, because the extrema could be away from the calculated axis. Please consider increasing dimensionality.\n")
+	if unicodewarn:
+		sys.stderr.write("Warning (print_gap_information): Some symbols could not be encoded in the output encoding (%s) and were forcibly converted to UTF-8. You may try to use 'export PYTHONIOENCODING=utf8' to get rid of this warning.\n" % sys.stdout.encoding)
+
+
+### BAND SHAPES ###
+
+band_shapes = {
+	(0,): 'Type I', (0, 1): 'Type II',
+	(0, 2, 1): 'Type III A', (2, 0, 1): 'Type III B',
+	(0, 2, 1, 3): 'Type IV A', (0, 2, 3, 1): 'Type IV B', (2, 0, 1, 3): 'Type IV C', (2, 0, 3, 1): 'Type IV D', (2, 3, 0, 1): 'Type IV E',
+	None: 'Type ??'
+}
+
+# The index tuples label the energy order of the extrema, where the first
+# element is the extremum at k = 0, the second at the smallest k != 0, etc.
+# Observation:
+# These are all permutations (0, 1, ... , n-1) where:
+#   pos(j) > pos(j+1) if j is even
+#   pos(j) < pos(j+1) if j is odd
+# Proof:
+# We assume that 0 labels a minimum; thus, all odd-numbered extrema are maxima
+# and all even-numbered extrema are minima. Each maximum must lie higher than
+# its neighbouring minima (and similarly for each minimum, mutatis mutandis),
+# hence its position in the permutation must be higher.
+# For n = 1, ..., 8, there are 1, 1, 2, 5, 16, 61, 272, 1385, possibilities.
+
+def band_shape(bands_extrema, raw = False, delta_e = 0.1, delta_k = 0.01):
+	"""Determine band shape based on the mutual positions of the extrema.
+
+	Arguments:
+	bands_extrema  A dict instance, whose keys are the band labels (band index
+	               or band & LL index) and values are lists of BandExtremum
+	               instances. This is the return value of band_local_extrema().
+	raw            True or False. If True, return raw band shape (a tuple of
+	               integers). If False, convert it to a human-readable string.
+	delta_e        Float. The minimum distance in energy for two extrema to be
+	               considered separate.
+	delta_k        Float. The minimum distance in momentum for two extrema to be
+	               considered separate.
+
+	Return:
+	Tuple of integers (if raw is True) or string (if raw is False).
+	"""
+
+	# Sort by momentum value (length)
+	ex_ord = np.argsort(np.array([b_ex.k.len() for b_ex in bands_extrema]))
+	# Do not include duplicates, i.e., values at almost the same energy or momentum
+	sorted_ex = [bands_extrema[ex_ord[0]]]
+	for j in ex_ord[1:]:
+		if abs(bands_extrema[j].energy - sorted_ex[-1].energy) > delta_e and bands_extrema[j].k.len() - sorted_ex[-1].k.len() > delta_k:
+			sorted_ex.append(bands_extrema[j])
+	# At higher momentum, min or max?
+	min_max_0 = sorted_ex[0].minmax
+	# Determine energy order of the sorted, non-duplicate array;
+	# invert if the value at 0 is a maximum
+	if min_max_0 == "max":
+		ex_ord = tuple(np.argsort(np.array([-b_ex.energy for b_ex in sorted_ex])))
+	else:
+		ex_ord = tuple(np.argsort(np.array([b_ex.energy for b_ex in sorted_ex])))
+
+	if raw:
+		return ex_ord
+	elif ex_ord in band_shapes:
+		return band_shapes[ex_ord]
+	else:
+		return band_shapes[None]
diff --git a/kdotpy-v1.0.0/src/kdotpy/hamiltonian/__init__.py b/kdotpy-v1.0.0/src/kdotpy/hamiltonian/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1cbe1cea81bd919302056440f2fccdb20a3beca6
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/hamiltonian/__init__.py
@@ -0,0 +1,55 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+# in hamiltonian.py:
+from .hamiltonian import hz_sparse, hz_sparse_ll, hz_block_diag, hzy_sparse, hzy_sparse_magn, hz_sparse_ll_full, hsplit_full, hsplit_ll_full, hz_sparse_split, hz_sparse_pot, hz_sparse_pot_ll_full, hbulk_split
+
+# in full.py:
+from .full import hz, hbulk, hz_ll, hbulk_ll, hzy, hzy_magn
+
+# in parity.py
+from .parity import parity_x, parity_y, parity_z
+
+# in blocks.py
+from .blocks import hexchange, hstrain, hzeeman, h_pot_1d
+
+# in transformable.py
+from .transformable import h_kterms
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/hamiltonian/blocks.py b/kdotpy-v1.0.0/src/kdotpy/hamiltonian/blocks.py
new file mode 100644
index 0000000000000000000000000000000000000000..24c9d7f3afdd0fcd5d9742f6b3a04e9bb85024f1
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/hamiltonian/blocks.py
@@ -0,0 +1,2035 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+from math import sqrt, sin, cos
+import numpy as np
+import sys
+from scipy.sparse import dia_matrix
+
+from ..physconst import hbarm0, eoverhbar, gg, muB
+from .. import spinmat as spin
+from ..physparams import Aexchange
+from ..momentum import Vector
+from .transform import lattice_reg_transform
+
+
+### HAMILTONIAN BUILDING BLOCKS
+
+def h0z(z, dz, k, params, kterms = None):
+	"""Hamiltonian block H0(kx, ky, z); purely kz part."""
+	# Momenta
+	one = (1 if dz == 0 else 0)  # for diagonal terms
+	kz_p = params.c_dz  * ( 1 if dz ==  1 else 0)
+	kz_m = params.c_dz  * (-1 if dz == -1 else 0)
+	kz2_p = params.c_dz2 * ( 1 if dz ==  1 else -1 if dz == 0 else 0)
+	kz2_m = params.c_dz2 * ( 1 if dz == -1 else -1 if dz == 0 else 0)
+	# the derivatives are split for proper symmetrization under hermitian conjugation
+	# Matrix elements
+	pp_p = params.z(z + 0.5)  # fractional coordinates are perfectly fine
+	pp_0 = params.z(z + 0)
+	pp_m = params.z(z - 0.5)
+	s23p_kz = sqrt(2 / 3) * (pp_p['P'] * kz_p + pp_m['P'] * kz_m)
+	t0   = pp_0['Ec'] * one + hbarm0 * (kz2_p * (2 * pp_p['F'] + 1) + kz2_m * (2 * pp_m['F'] + 1))
+	w0p  = pp_0['Ev'] * one + hbarm0 * (kz2_p * (2 * pp_p['gamma2'] - pp_p['gamma1']) + kz2_m * (2 * pp_m['gamma2'] - pp_m['gamma1']))
+	w0m  = pp_0['Ev'] * one + hbarm0 * (kz2_p * (-2 * pp_p['gamma2'] - pp_p['gamma1']) + kz2_m * (-2 * pp_m['gamma2'] - pp_m['gamma1']))
+
+	if params.norbitals == 8:
+		w0_7 = pp_0['Ev'] * one - hbarm0 * (kz2_p * pp_p['gamma1'] + kz2_m * pp_m['gamma1']) - pp_0['delta_so'] * one
+		s13p_kz = sqrt(1 / 3) * (pp_p['P'] * kz_p + pp_m['P'] * kz_m)
+		s2v0 = sqrt(2.) * hbarm0 * 2 * (pp_p['gamma2'] * kz2_p + pp_m['gamma2'] * kz2_m)
+		hmat = np.array([
+			[      t0,     0.0, 0.0, s23p_kz,     0.0, 0.0, -s13p_kz,    0.0 ],
+			[     0.0,      t0, 0.0,     0.0, s23p_kz, 0.0,      0.0, s13p_kz],
+			[     0.0,     0.0, w0p,     0.0,     0.0, 0.0,      0.0,    0.0 ],
+			[ s23p_kz,     0.0, 0.0,     w0m,     0.0, 0.0,     s2v0,    0.0 ],
+			[     0.0, s23p_kz, 0.0,     0.0,     w0m, 0.0,      0.0,  -s2v0 ],
+			[     0.0,     0.0, 0.0,     0.0,     0.0, w0p,      0.0,    0.0 ],
+			[-s13p_kz,     0.0, 0.0,    s2v0,     0.0, 0.0,     w0_7,    0.0 ],
+			[     0.0, s13p_kz, 0.0,     0.0,   -s2v0, 0.0,      0.0,   w0_7 ]])
+	else:
+		hmat = np.array([
+			[     t0,     0.0, 0.0, s23p_kz,     0.0, 0.0 ],
+			[    0.0,      t0, 0.0,     0.0, s23p_kz, 0.0 ],
+			[    0.0,     0.0, w0p,     0.0,     0.0, 0.0 ],
+			[s23p_kz,     0.0, 0.0,     w0m,     0.0, 0.0 ],
+			[    0.0, s23p_kz, 0.0,     0.0,     w0m, 0.0 ],
+			[    0.0,     0.0, 0.0,     0.0,     0.0, w0p ]])
+
+	if params.lattice_transformed_by_matrix():
+		if kterms is None:
+			raise ValueError("Transformation requires argument kterms to be defined")
+		mu_terms = kterms['mu88']
+		gamma1_kz2 = kz2_p * pp_p['gamma1'] + kz2_m * pp_m['gamma1']
+		gamma2_kz2 = kz2_p * pp_p['gamma2'] + kz2_m * pp_m['gamma2']
+		gamma3_kz2 = kz2_p * pp_p['gamma3'] + kz2_m * pp_m['gamma3']
+		mu_zz = hbarm0 * (mu_terms['zz']) * (gamma2_kz2 - gamma3_kz2)
+		g23_zz = hbarm0 * np.diag([1,-1,-1,1]) * (gamma2_kz2 + gamma3_kz2)
+		g1_zz = hbarm0 * np.diag([1,1,1,1]) * (-gamma1_kz2)
+		ev_zz = pp_0['Ev'] * one * np.diag([1,1,1,1])
+		h88_mat = (mu_zz + g23_zz + g1_zz + ev_zz)
+		hmat[2:6,2:6] = h88_mat
+
+		if params.norbitals == 8:
+			mu_terms78 = kterms['mu78']
+			mu_terms87 = kterms['mu87']
+			gg_terms78 = kterms['gg78']
+			gg_terms87 = kterms['gg87']
+			mu78_zz = (gamma2_kz2 - gamma3_kz2) * mu_terms78[4] * 2 / np.sqrt(3)
+			gg78_zz = (gamma2_kz2 + gamma3_kz2) * gg_terms78[4] * 2 / np.sqrt(3)
+			h78_mat = 3 * hbarm0 * (mu78_zz + gg78_zz)
+			hmat[6:8, 2:6] = h78_mat
+			hmat[2:6, 6:8] = h78_mat.conjugate().transpose()
+
+	return hmat
+
+def h0zy(z, dz, y, dy, kx, params, kterms = None):
+	"""Hamiltonian block H0(kx, y, z); purely kz part."""
+	return h0z(z, dz, [kx, 0.0], params, kterms = kterms) if dy == 0 else np.zeros((params.norbitals, params.norbitals), dtype = complex)
+
+def h0bulk(k, params, lattice_reg = False, kterms = None):
+	"""Hamiltonian block H0(kx, ky, kz); purely kz part."""
+	# Momenta
+	if isinstance(k, (list, tuple)) and len(k) == 2:  # Needed for bulk_ll calculation in symbolic LL mode
+		k = [k[0], k[1], 0.0]
+	if lattice_reg:
+		cc = params.a_lattice
+		if params.lattice_transformed_by_matrix():
+			kx, ky, kz = lattice_reg_transform(k, cc, params.lattice_trans)
+			kx2, ky2, kz2, kykz, kxkz, kxky = lattice_reg_transform(k, cc, params.lattice_trans, quadratic = True)
+		else:
+			kz = sin(cc * k[2]) / cc
+			kz2 = (1. - cos(cc * k[2])) * 2. / cc**2
+			kx = sin(cc * k[0]) / cc
+			kx2 = (1. - cos(cc * k[0])) * 2. / cc**2
+			ky = sin(cc * k[1]) / cc
+			ky2 = (1. - cos(cc * k[1])) * 2. / cc**2
+			kxky, kxkz, kykz = kx * ky, kx * kz, ky * kz
+	else:
+		kx, ky, kz = k[0], k[1], k[2]
+		kx2, ky2, kz2 = kx**2, ky**2, kz**2
+		kxky, kxkz, kykz = kx * ky, kx * kz, ky * kz
+	k2 = kx2 + ky2
+	kp = kx + 1.j * ky
+	km = kx - 1.j * ky
+
+	pp = params.z(None)
+	s23p_kz = sqrt(2 / 3) * pp['P'] * kz
+	t0   = pp['Ec'] + hbarm0 * kz2 * (2 * pp['F'] + 1)
+	w0p  = pp['Ev'] + hbarm0 * kz2 * (2 * pp['gamma2'] - pp['gamma1'])
+	w0m  = pp['Ev'] + hbarm0 * kz2 * (-2 * pp['gamma2'] - pp['gamma1'])
+
+	if params.norbitals == 8:
+		w0_7 = pp['Ev'] - hbarm0 * kz2 * pp['gamma1'] - pp['delta_so']
+		s13p_kz = sqrt(1 / 3) * pp['P'] * kz
+		s2v0 = sqrt(2.) * hbarm0 * 2 * kz2 * pp['gamma2']
+		hmat = np.array([
+			[      t0,     0.0, 0.0, s23p_kz,     0.0, 0.0, -s13p_kz,    0.0 ],
+			[     0.0,      t0, 0.0,     0.0, s23p_kz, 0.0,      0.0, s13p_kz],
+			[     0.0,     0.0, w0p,     0.0,     0.0, 0.0,      0.0,    0.0 ],
+			[ s23p_kz,     0.0, 0.0,     w0m,     0.0, 0.0,     s2v0,    0.0 ],
+			[     0.0, s23p_kz, 0.0,     0.0,     w0m, 0.0,      0.0,  -s2v0 ],
+			[     0.0,     0.0, 0.0,     0.0,     0.0, w0p,      0.0,    0.0 ],
+			[-s13p_kz,     0.0, 0.0,    s2v0,     0.0, 0.0,     w0_7,    0.0 ],
+			[     0.0, s13p_kz, 0.0,     0.0,   -s2v0, 0.0,      0.0,   w0_7 ]], dtype = complex)
+	else:
+		hmat = np.array([
+			[     t0,     0.0, 0.0, s23p_kz,     0.0, 0.0 ],
+			[    0.0,      t0, 0.0,     0.0, s23p_kz, 0.0 ],
+			[    0.0,     0.0, w0p,     0.0,     0.0, 0.0 ],
+			[s23p_kz,     0.0, 0.0,     w0m,     0.0, 0.0 ],
+			[    0.0, s23p_kz, 0.0,     0.0,     w0m, 0.0 ],
+			[    0.0,     0.0, 0.0,     0.0,     0.0, w0p ]], dtype = complex)
+
+	if params.lattice_transformed_by_matrix():
+		if kterms is None:
+			raise ValueError("Transformation requires argument kterms to be defined")
+		mu_terms = kterms['mu88']
+		mu_zz = hbarm0 * kz2 * (mu_terms['zz']) * (pp['gamma2'] - pp['gamma3'])
+		g23_zz = hbarm0 * kz2 * np.diag([1,-1,-1,1]) * (pp['gamma2'] + pp['gamma3'])
+		g1_zz = hbarm0 * kz2 * np.diag([1,1,1,1]) * (-pp['gamma1'])
+		ev_zz = pp['Ev'] * np.diag([1,1,1,1])
+		hmat[2:6,2:6] = (mu_zz + g23_zz + g1_zz + ev_zz)
+
+		if params.norbitals == 8:
+			mu_terms78 = kterms['mu78']
+			gg_terms78 = kterms['gg78']
+			mu78_zz = (pp['gamma2'] - pp['gamma3']) * 2 * kz2 * mu_terms78[4] / np.sqrt(3.)
+			gg78_zz = (pp['gamma2'] + pp['gamma3']) * 2 * kz2 * gg_terms78[4] / np.sqrt(3.)
+			h78_mat = 3 * hbarm0 * (mu78_zz + gg78_zz)
+			s2v0 = h78_mat[0,1]
+			hmat[6,3], hmat[3,6], hmat[7,4], hmat[4,7] = s2v0, s2v0, -s2v0, -s2v0
+	return hmat
+
+def h1z(z, dz, k, params, lattice_reg = False, axial = True, magn = None, ignore_magnxy = False, kterms = None):
+	"""Hamiltonian block H1(kx, ky, z); remainder (all except pure kz terms); Version with in-plane magnetic field terms (optional). Bz is always ignored!"""
+	# Momenta
+	one = (1 if dz == 0 else 0)  # for diagonal terms
+	kz_p = params.c_dz  * ( 1 if dz ==  1 else 0)
+	kz_m = params.c_dz  * (-1 if dz == -1 else 0)
+	# the derivatives are split for proper symmetrization under hermitian conjugation
+
+	magn = 0.0 if magn is None else magn
+	if isinstance(magn, Vector):
+		bx, by, bz = magn.xyz()
+	elif isinstance(magn, tuple) and len(magn) == 3:
+		bx, by, bz = magn
+	elif isinstance(magn, (int, float, np.integer, np.floating)):
+		bx, by, bz = 0, 0, magn
+	else:
+		raise TypeError("Invalid type for variable magn")
+
+	magnxy = not ignore_magnxy and (abs(bx) > 1e-9 or abs(by) > 1e-9)  # do we consider in-plane magnetic field
+	if magnxy:  # in-plane field
+		# Peierls substitution:
+		#   kx -> kx + eAx, ky -> ky + eAy, kz -> kz + eAz
+		# with:
+		#   eAx = (e B / hbar) * ( by * z)
+		#   eAy = (e B / hbar) * (-bx * z)
+		#   eAz = 0
+		# Note that bz is ignored, by design!
+		# In this geometry, we can simply shift the momenta kx, ky. (This is
+		# not possible if Bz != 0, see h1zy_magn.) Note however the k+ kz and
+		# k- kz terms in sp, spd, sm, smd.
+		# The lattice constant zres is included, because z is just an index
+		z0 = (params.nz - 1) * 0.5
+		eAx = eoverhbar * by * params.zres * (z - z0)
+		eAy = -eoverhbar * bx * params.zres * (z - z0)
+		eBx = eoverhbar * bx
+		eBy = eoverhbar * by
+		eBp = eoverhbar * (bx + 1.j * by)
+		eBm = eoverhbar * (bx - 1.j * by)
+		k = [k[0] + eAx, k[1] + eAy]
+
+	if lattice_reg:
+		cc = params.a_lattice
+		if params.lattice_transformed_by_matrix():
+			kx, ky = lattice_reg_transform(k, cc, params.lattice_trans)
+			kx2, ky2, kxky = lattice_reg_transform(k, cc, params.lattice_trans, quadratic = True)
+		else:
+			kx = sin(cc * k[0]) / cc
+			kx2 = (1. - cos(cc * k[0])) * 2. / cc**2
+			ky = sin(cc * k[1]) / cc
+			ky2 = (1. - cos(cc * k[1])) * 2. / cc**2
+			kxky = kx * ky
+	else:
+		kx, ky = k[0], k[1]
+		kx2, ky2 = kx**2, ky**2
+		kxky = kx * ky
+	k2 = kx2 + ky2
+	kp = kx + 1.j * ky
+	km = kx - 1.j * ky
+	kp2 = kx2 - ky2 + 2.j * kxky
+	km2 = kx2 - ky2 - 2.j * kxky
+
+	# Matrix elements
+	pp_p = params.z(z + 0.5)  # fractional coordinates are perfectly fine
+	pp   = params.z(z + 0)
+	pp_m = params.z(z - 0.5)
+
+	ps2  = sqrt(1 / 2) * pp['P'] * one
+	ps6  = sqrt(1 / 6) * pp['P'] * one
+	hh   = 1
+	t1   = hbarm0 * (2 * pp['F'] + 1) * k2 * one
+	rr   = 0.5 * km2 * hbarm0 * sqrt(3.) * (pp['gamma3'] + pp['gamma2']) * one  # axial term
+	rrd  = 0.5 * kp2 * hbarm0 * sqrt(3.) * (pp['gamma3'] + pp['gamma2']) * one
+	if not axial:
+		rr  += -0.5 * kp2 * hbarm0 * sqrt(3.) * (pp['gamma3'] - pp['gamma2']) * one  # non-axial term
+		rrd += -0.5 * km2 * hbarm0 * sqrt(3.) * (pp['gamma3'] - pp['gamma2']) * one
+	w1p  = -hbarm0 * (pp['gamma1'] + pp['gamma2']) * k2 * one
+	w1m  = -hbarm0 * (pp['gamma1'] - pp['gamma2']) * k2 * one
+	gamma3_kz = pp_p['gamma3'] * kz_p + pp_m['gamma3'] * kz_m  # Note: Effectively, the terms have opposite signs, because kz_p and kz_m are defined that way
+	sp   = -hbarm0 * sqrt(3.) * kp * (2 * gamma3_kz - 0.j * pp['dzgamma3'] * one + 1.j * pp['dzkappa'] * one)
+	spd  = -hbarm0 * sqrt(3.) * km * (2 * gamma3_kz + 0.j * pp['dzgamma3'] * one - 1.j * pp['dzkappa'] * one)
+	sm   = -hbarm0 * sqrt(3.) * km * (2 * gamma3_kz - 0.j * pp['dzgamma3'] * one + 1.j * pp['dzkappa'] * one)
+	smd  = -hbarm0 * sqrt(3.) * kp * (2 * gamma3_kz + 0.j * pp['dzgamma3'] * one - 1.j * pp['dzkappa'] * one)
+	co   =  2.j * hbarm0 * km * pp['dzkappa'] * one  # TODO: Sign WTF?
+	cod  = -2.j * hbarm0 * kp * pp['dzkappa'] * one  # TODO: Sign WTF?
+
+	if magnxy:
+		# extra terms from in-plane gauge field
+		av_zp = (1 if dz ==  1 else 0)
+		av_zm = (1 if dz == -1 else 0)
+		gamma3_av = 0.5 * (pp_p['gamma3'] * av_zp + pp_m['gamma3'] * av_zm)  # note difference in prefactor and signs compared to gamma3_kz
+		sp  += -hbarm0 * sqrt(3.) * -eBp * gamma3_av
+		spd += -hbarm0 * sqrt(3.) *  eBm * gamma3_av
+		sm  += -hbarm0 * sqrt(3.) *  eBm * gamma3_av
+		smd += -hbarm0 * sqrt(3.) * -eBp * gamma3_av
+
+	if params.lattice_transformed_by_matrix():
+		if kterms is None:
+			raise ValueError("Transformation requires argument kterms to be defined")
+		mu_terms = kterms['mu88']
+		gg_terms = kterms['gg88']
+		kappa_terms = kterms['kappa88']
+
+		gamma2_kz = pp_p['gamma2'] * kz_p + pp_m['gamma2'] * kz_m
+		mu_k = (pp['gamma2'] - pp['gamma3']) * one * (kx2 * mu_terms['xx'] + ky2 * mu_terms['yy'] + kxky * mu_terms['xy']) + (gamma2_kz - gamma3_kz) * (kx * mu_terms['xz'] + ky * mu_terms['yz'])
+		gg_k = (pp['gamma2'] + pp['gamma3']) * one * (kx2 * gg_terms['xx'] + ky2 * gg_terms['yy'] + kxky * gg_terms['xy']) + (gamma2_kz + gamma3_kz) * (kx * gg_terms['xz'] + ky * gg_terms['yz'])
+		g1_k = -pp['gamma1'] * np.diag([1,1,1,1]) * k2 * one
+		kappa_k = pp['dzkappa'] * (kx * (kappa_terms['xz'] - kappa_terms['zx']) + ky * (kappa_terms['yz'] - kappa_terms['zy'])) * one
+		h88_mat = hbarm0 * (mu_k + gg_k + g1_k + kappa_k)
+
+		if magnxy:
+			# extra terms from in-plane gauge field
+			gamma2_av = 0.5 * (pp_p['gamma2'] * av_zp + pp_m['gamma2'] * av_zm)  # note difference in prefactor and signs compared to gamma2_kz
+			delta_mu_k = 0.5j * (gamma2_av - gamma3_av) * (-eBy * mu_terms['xz'] + eBx * mu_terms['yz'])
+			delta_gg_k = 0.5j * (gamma2_av + gamma3_av) * (-eBy * gg_terms['xz'] + eBx * gg_terms['yz'])
+			h88_mat += hbarm0 * (delta_mu_k + delta_gg_k)
+		w1p, w1m = h88_mat[0,0], h88_mat[1,1]
+		rr, rrd = h88_mat[0,2], h88_mat[2,0]
+		sp, spd, sm, smd = h88_mat[3,2], h88_mat[2,3], -h88_mat[0,1], -h88_mat[1,0]
+		co, cod = h88_mat[1,2], h88_mat[2,1]
+
+	if params.norbitals == 8:
+		ps3 = sqrt(1 / 3) * pp['P'] * one
+		s2 = sqrt(2.)
+		u1 = -hbarm0 * pp['gamma1'] * k2 * one
+		s2v1 = -s2 * hbarm0 * pp['gamma2'] * k2 * one
+		s32stp   = -hbarm0 * (3. / s2) * kp * (2 * gamma3_kz - 0.j * pp['dzgamma3'] * one - (1.j / 3.) * pp['dzkappa'] * one)  # sqrt(3/2) * Stilde_+
+		s32stpd  = -hbarm0 * (3. / s2) * km * (2 * gamma3_kz + 0.j * pp['dzgamma3'] * one + (1.j / 3.) * pp['dzkappa'] * one)  # sqrt(3/2) * Stilde_+^dagger
+		s32stm   = -hbarm0 * (3. / s2) * km * (2 * gamma3_kz - 0.j * pp['dzgamma3'] * one - (1.j / 3.) * pp['dzkappa'] * one)  # sqrt(3/2) * Stilde_-
+		s32stmd  = -hbarm0 * (3. / s2) * kp * (2 * gamma3_kz + 0.j * pp['dzgamma3'] * one + (1.j / 3.) * pp['dzkappa'] * one)  # sqrt(3/2) * Stilde_-^dagger
+
+		if magnxy:
+			# extra terms from in-plane gauge field
+			s32stp  += -hbarm0 * (3. / s2) * -eBp * gamma3_av
+			s32stpd += -hbarm0 * (3. / s2) *  eBm * gamma3_av
+			s32stm  += -hbarm0 * (3. / s2) *  eBm * gamma3_av
+			s32stmd += -hbarm0 * (3. / s2) * -eBp * gamma3_av
+
+		if params.lattice_transformed_by_matrix():
+			mu_terms78 = kterms['mu78']
+			gg_terms78 = kterms['gg78']
+			kappa_terms78 = kterms['kappa78']
+			mu_terms87 = kterms['mu87']
+			gg_terms87 = kterms['gg87']
+			kappa_terms87 = kterms['kappa87']
+
+			mu78_k = (pp['gamma2'] - pp['gamma3']) * one * (2 * kxky * mu_terms78[2] + (kx2 - ky2) * mu_terms78[3] + (-kx2 - ky2) * mu_terms78[4] / np.sqrt(3.)) + (gamma2_kz - gamma3_kz) * (2 * kx * mu_terms78[1] + 2 * ky * mu_terms78[0])  # NOTE: zz term discarded
+			gg78_k = (pp['gamma2'] + pp['gamma3']) * one * (2 * kxky * gg_terms78[2] + (kx2 - ky2) * gg_terms78[3] + (-kx2 - ky2) * gg_terms78[4] / np.sqrt(3.)) + (gamma2_kz + gamma3_kz) * (2 * kx * gg_terms78[1] + 2 * ky * gg_terms78[0])  # NOTE: zz term discarded
+			kappa78_k = 0.5 * pp['dzkappa'] * (kx * (kappa_terms78['xz'] - kappa_terms78['zx']) + ky * (kappa_terms78['yz'] - kappa_terms78['zy'])) * one
+
+			mu87_k = (pp['gamma2'] - pp['gamma3']) * one * (2 * kxky * mu_terms87[2] + (kx2 - ky2) * mu_terms87[3] + (-kx2 - ky2) * mu_terms87[4] / np.sqrt(3.)) + (gamma2_kz - gamma3_kz) * (2 * kx * mu_terms87[1] + 2 * ky * mu_terms87[0])  # NOTE: zz term discarded
+			gg87_k = (pp['gamma2'] + pp['gamma3']) * one * (2 * kxky * gg_terms87[2] + (kx2 - ky2) * gg_terms87[3] + (-kx2 - ky2) * gg_terms87[4] / np.sqrt(3.)) + (gamma2_kz + gamma3_kz) * (2 * kx * gg_terms87[1] + 2 * ky * gg_terms87[0])  # NOTE: zz term discarded
+			kappa87_k = 0.5 * pp['dzkappa'] * (kx * (kappa_terms87['xz'] - kappa_terms87['zx']) + ky * (kappa_terms87['yz'] - kappa_terms87['zy'])) * one
+
+			if magnxy:
+				# extra terms from in-plane gauge field
+				mu78_k += 1j * (gamma2_av - gamma3_av) * (-eBy * mu_terms78[1] + eBx * mu_terms78[0])
+				gg78_k += 1j * (gamma2_av + gamma3_av) * (-eBy * gg_terms78[1] + eBx * gg_terms78[0])
+				mu87_k += 1j * (gamma2_av - gamma3_av) * (-eBy * mu_terms87[1] + eBx * mu_terms87[0])
+				gg87_k += 1j * (gamma2_av + gamma3_av) * (-eBy * gg_terms87[1] + eBx * gg_terms87[0])
+
+			h78_mat = 3 * hbarm0 * (mu78_k + gg78_k + kappa78_k)
+			h87_mat = 3 * hbarm0 * (mu87_k + gg87_k + kappa87_k)
+			s2v1 = h78_mat[0,1]
+			s32stpd = -h78_mat[0,2]
+			s32stmd = -h78_mat[1,1]
+			s32stp = -h87_mat[2,0]
+			s32stm = -h87_mat[1,1]
+
+		hmat = np.array([
+			[            t1,           0.0, -hh * ps2 * kp,       0.0, ps6 * km,           0.0,       0.0, -ps3 * km ],
+			[           0.0,            t1,            0.0, -ps6 * kp,      0.0, hh * ps2 * km, -ps3 * kp,       0.0 ],
+			[-hh * ps2 * km,           0.0,            w1p,       -sm,       rr,           0.0,   sm / s2,  -s2 * rr ],
+			[           0.0,     -ps6 * km,           -smd,       w1m,       co,            rr,      s2v1,   -s32stm ],
+			[      ps6 * kp,           0.0,            rrd,       cod,      w1m,           spd,   -s32stp,     -s2v1 ],
+			[           0.0, hh * ps2 * kp,            0.0,       rrd,       sp,           w1p,  s2 * rrd,   sp / s2 ],
+			[           0.0,     -ps3 * km,       smd / s2,      s2v1, -s32stpd,       s2 * rr,        u1,        co ],
+			[     -ps3 * kp,           0.0,      -s2 * rrd,  -s32stmd,    -s2v1,      spd / s2,       cod,        u1 ]])
+	else:
+		hmat = np.array([
+			[            t1,           0.0, -hh * ps2 * kp,       0.0, ps6 * km,           0.0 ],
+			[           0.0,            t1,            0.0, -ps6 * kp,      0.0, hh * ps2 * km ],
+			[-hh * ps2 * km,           0.0,            w1p,       -sm,       rr,           0.0 ],
+			[           0.0,     -ps6 * km,           -smd,       w1m,       co,            rr ],
+			[      ps6 * kp,           0.0,            rrd,       cod,      w1m,           spd ],
+			[           0.0, hh * ps2 * kp,            0.0,       rrd,       sp,           w1p ]])
+
+	return hmat
+
+def h1z_ll(z, dz, n, params, lattice_reg = False, axial = True, magn = None):
+	"""LL Hamiltonian block H1_n(kx, ky, z) for legacy LL mode, where kx and ky are replaced by ladder operators.
+	n is the LL index."""
+	magn = 0.0 if magn is None else magn
+	bz = magn.z() if isinstance(magn, Vector) else magn[2] if isinstance(magn, tuple) and len(magn) == 3 else magn  # z component
+	if not axial:
+		sys.stderr.write("ERROR (hz1_ll): Landau level calculation in non-axial representation not (yet) implemented.\n")
+		exit(1)
+	# Momenta
+	one = (1 if dz == 0 else 0)  # for diagonal terms
+	kz_p = params.c_dz  * ( 1 if dz ==  1 else 0)
+	kz_m = params.c_dz  * (-1 if dz == -1 else 0)
+	# the derivatives are split for proper symmetrization under hermitian conjugation
+
+	eB = eoverhbar * abs(bz)  # also: 1 / lB^2
+	lBinv = np.sqrt(2 * abs(eB))  # sqrt(2)/lB = sqrt(2 eB/hbar)
+	def kp(nn): return 0.0 if nn < 0 else lBinv * np.sqrt(nn+1)  # ladder op a^dagger
+	def km(nn): return 0.0 if nn <= 0 else lBinv * np.sqrt(nn)   # ladder op a
+	def k2(nn): return 0.0 if nn < 0 else 2 * eB * (nn + 0.5)    # ladder op a^dagger a
+	def kp2(nn): return 0.0 if nn < 0 else 2 * eB * np.sqrt((nn + 1) * (nn + 2))  # ladder op a^dagger a^dagger
+	def km2(nn): return 0.0 if nn <= 1 else 2 * eB * np.sqrt(nn * (nn - 1))   # ladder op a a
+
+	# Matrix elements
+	pp_p = params.z(z + 0.5)  # fractional coordinates are perfectly fine
+	pp   = params.z(z + 0)
+	pp_m = params.z(z - 0.5)
+
+	ps2  = sqrt(1 / 2) * pp['P'] * one
+	ps6  = sqrt(1 / 6) * pp['P'] * one
+	hh   = 1
+	t1u  = hbarm0 * (2 * pp['F'] + 1) * k2(n  ) * one
+	t1d  = hbarm0 * (2 * pp['F'] + 1) * k2(n+1) * one
+	rr0  = 0.5 * hbarm0 * sqrt(3.) * (pp['gamma3'] + pp['gamma2']) * one  # with axial approximation
+	rr1  = rr0 * km2(n+1)
+	rr2  = rr0 * km2(n+2)
+	rrd1 = rr0 * kp2(n-1)
+	rrd2 = rr0 * kp2(n  )
+
+	w1pu = -hbarm0 * (pp['gamma1'] + pp['gamma2']) * k2(n-1) * one
+	w1mu = -hbarm0 * (pp['gamma1'] - pp['gamma2']) * k2(n  ) * one
+	w1md = -hbarm0 * (pp['gamma1'] - pp['gamma2']) * k2(n+1) * one
+	w1pd = -hbarm0 * (pp['gamma1'] + pp['gamma2']) * k2(n+2) * one
+
+	gamma3_kz = pp_p['gamma3'] * kz_p + pp_m['gamma3'] * kz_m  # Note: Effectively, the terms have opposite signs, because kz_p and kz_m are defined that way
+	sp   = -hbarm0 * sqrt(3.) * kp(n+1) * (2 * gamma3_kz - 0.j * pp['dzgamma3'] * one + 1.j * pp['dzkappa'] * one)
+	spd  = -hbarm0 * sqrt(3.) * km(n+2) * (2 * gamma3_kz + 0.j * pp['dzgamma3'] * one - 1.j * pp['dzkappa'] * one)
+	sm   = -hbarm0 * sqrt(3.) * km(n  ) * (2 * gamma3_kz - 0.j * pp['dzgamma3'] * one + 1.j * pp['dzkappa'] * one)
+	smd  = -hbarm0 * sqrt(3.) * kp(n-1) * (2 * gamma3_kz + 0.j * pp['dzgamma3'] * one - 1.j * pp['dzkappa'] * one)
+	co   =  2.j * hbarm0 * km(n+1) * pp['dzkappa'] * one  # TODO: Sign WTF?
+	cod  = -2.j * hbarm0 * kp(n  ) * pp['dzkappa'] * one  # TODO: Sign WTF?
+
+	if params.norbitals == 8:
+		ps3 = sqrt(1 / 3) * pp['P'] * one
+		s2 = sqrt(2.)
+		u1u = -hbarm0 * pp['gamma1'] * k2(n  ) * one
+		u1d = -hbarm0 * pp['gamma1'] * k2(n+1) * one
+		s2v1u = -s2 * hbarm0 * pp['gamma2'] * k2(n  ) * one
+		s2v1d = -s2 * hbarm0 * pp['gamma2'] * k2(n+1) * one
+		s32stp   = -hbarm0 * (3. / s2) * kp(n  ) * (2 * gamma3_kz - 0.j * pp['dzgamma3'] * one - (1.j / 3.) * pp['dzkappa'] * one)  # sqrt(3/2) * Stilde_+
+		s32stpd  = -hbarm0 * (3. / s2) * km(n+1) * (2 * gamma3_kz + 0.j * pp['dzgamma3'] * one + (1.j / 3.) * pp['dzkappa'] * one)  # sqrt(3/2) * Stilde_+^dagger
+		s32stm   = -hbarm0 * (3. / s2) * km(n+1) * (2 * gamma3_kz - 0.j * pp['dzgamma3'] * one - (1.j / 3.) * pp['dzkappa'] * one)  # sqrt(3/2) * Stilde_-
+		s32stmd  = -hbarm0 * (3. / s2) * kp(n  ) * (2 * gamma3_kz + 0.j * pp['dzgamma3'] * one + (1.j / 3.) * pp['dzkappa'] * one)  # sqrt(3/2) * Stilde_-^dagger
+
+		return np.array([
+			[          t1u,            0.0, -hh*ps2*kp(n-1),        0.0, ps6*km(n+1),           0.0,           0.0, -ps3 * km(n+1) ],
+			[          0.0,            t1d,             0.0, -ps6*kp(n),         0.0, hh*ps2*km(n+2), -ps3 * kp(n),            0.0 ],
+			[-hh*ps2*km(n),            0.0,            w1pu,        -sm,         rr1,           0.0,       sm / s2,      -s2 * rr1 ],
+			[          0.0,   -ps6*km(n+1),            -smd,       w1mu,          co,           rr2,         s2v1u,        -s32stm ],
+			[    ps6*kp(n),            0.0,            rrd1,        cod,        w1md,           spd,       -s32stp,         -s2v1d ],
+			[          0.0, hh*ps2*kp(n+1),             0.0,       rrd2,          sp,          w1pd,     s2 * rrd2,        sp / s2 ],
+			[          0.0, -ps3 * km(n+1),        smd / s2,      s2v1u,    -s32stpd,      s2 * rr2,           u1u,             co ],
+			[ -ps3 * kp(n),            0.0,      -s2 * rrd1,   -s32stmd,      -s2v1d,      spd / s2,           cod,            u1d ]])
+	else:
+		return np.array([
+			[          t1u,            0.0, -hh*ps2*kp(n-1),        0.0, ps6*km(n+1),           0.0 ],
+			[          0.0,            t1d,             0.0, -ps6*kp(n),         0.0, hh*ps2*km(n+2)],
+			[-hh*ps2*km(n),            0.0,            w1pu,        -sm,         rr1,           0.0 ],
+			[          0.0,   -ps6*km(n+1),            -smd,       w1mu,          co,           rr2 ],
+			[    ps6*kp(n),            0.0,            rrd1,        cod,        w1md,           spd ],
+			[          0.0, hh*ps2*kp(n+1),             0.0,       rrd2,          sp,          w1pd ]])
+
+# H1 as function of (kx, ky, z)
+def h1bulk_ll(k, n, params, lattice_reg = False, axial = True, magn = None):
+	"""LL Hamiltonian block H1_n(kx, ky, kz) for legacy LL mode, where kx and ky are replaced by ladder operators.
+	n is the LL index."""
+	magn = 0.0 if magn is None else magn
+	bz = magn.z() if isinstance(magn, Vector) else magn[2] if isinstance(magn, tuple) and len(magn) == 3 else magn  # z component
+	if not axial:
+		sys.stderr.write("ERROR (hz1_ll): Landau level calculation in non-axial representation not (yet) implemented.\n")
+		exit(1)
+
+	# Momenta
+	if lattice_reg:
+		cc = params.a_lattice
+		kz = sin(cc * k[2]) / cc
+		kz2 = (1. - cos(cc * k[2])) * 2. / cc**2
+	else:
+		kz = k[2]
+		kz2 = k[2]**2
+
+	eB = eoverhbar * abs(bz)  # also: 1 / lB^2
+	lBinv = np.sqrt(2 * abs(eB))  # sqrt(2)/lB = sqrt(2 eB/hbar)
+	def kp(nn): return 0.0 if nn < 0 else lBinv * np.sqrt(nn+1)  # ladder op a^dagger
+	def km(nn): return 0.0 if nn <= 0 else lBinv * np.sqrt(nn)   # ladder op a
+	def k2(nn): return 0.0 if nn < 0 else 2 * eB * (nn + 0.5)    # ladder op a^dagger a
+	def kp2(nn): return 0.0 if nn < 0 else 2 * eB * np.sqrt((nn + 1) * (nn + 2))  # ladder op a^dagger a^dagger
+	def km2(nn): return 0.0 if nn <= 1 else 2 * eB * np.sqrt(nn * (nn - 1))   # ladder op a a
+
+	# Matrix elements
+	pp = params.z(None)
+	pp['dzkappa'] = 0.0
+	pp['dzgamma3'] = 0.0
+
+	ps2  = sqrt(1 / 2) * pp['P']
+	ps6  = sqrt(1 / 6) * pp['P']
+	hh   = 1
+	t1u  = hbarm0 * (2 * pp['F'] + 1) * k2(n  )
+	t1d  = hbarm0 * (2 * pp['F'] + 1) * k2(n+1)
+	rr0  = 0.5 * hbarm0 * sqrt(3.) * (pp['gamma3'] + pp['gamma2'])  # with axial approximation
+	rr1  = rr0 * km2(n+1)
+	rr2  = rr0 * km2(n+2)
+	rrd1 = rr0 * kp2(n-1)
+	rrd2 = rr0 * kp2(n  )
+
+	w1pu = -hbarm0 * (pp['gamma1'] + pp['gamma2']) * k2(n-1)
+	w1mu = -hbarm0 * (pp['gamma1'] - pp['gamma2']) * k2(n  )
+	w1md = -hbarm0 * (pp['gamma1'] - pp['gamma2']) * k2(n+1)
+	w1pd = -hbarm0 * (pp['gamma1'] + pp['gamma2']) * k2(n+2)
+
+	gamma3_kz = pp['gamma3'] * kz
+	sp   = -hbarm0 * sqrt(3.) * kp(n+1) * (2 * gamma3_kz)  # pp['dzgamma3'] = pp['dzkappa'] = 0
+	spd  = -hbarm0 * sqrt(3.) * km(n+2) * (2 * gamma3_kz)  # pp['dzgamma3'] = pp['dzkappa'] = 0
+	sm   = -hbarm0 * sqrt(3.) * km(n  ) * (2 * gamma3_kz)  # pp['dzgamma3'] = pp['dzkappa'] = 0
+	smd  = -hbarm0 * sqrt(3.) * kp(n-1) * (2 * gamma3_kz)  # pp['dzgamma3'] = pp['dzkappa'] = 0
+	co   = 0  # pp['dzkappa'] = 0
+	cod  = 0  # pp['dzkappa'] = 0
+
+	if params.norbitals == 8:
+		ps3 = sqrt(1 / 3) * pp['P']
+		s2 = sqrt(2.)
+		u1u = -hbarm0 * pp['gamma1'] * k2(n  )
+		u1d = -hbarm0 * pp['gamma1'] * k2(n+1)
+		s2v1u = -s2 * hbarm0 * pp['gamma2'] * k2(n  )
+		s2v1d = -s2 * hbarm0 * pp['gamma2'] * k2(n+1)
+		s32stp   = -hbarm0 * (3. / s2) * kp(n  ) * (2 * gamma3_kz)  # sqrt(3/2) * Stilde_+
+		s32stpd  = -hbarm0 * (3. / s2) * km(n+1) * (2 * gamma3_kz)  # sqrt(3/2) * Stilde_+^dagger
+		s32stm   = -hbarm0 * (3. / s2) * km(n+1) * (2 * gamma3_kz)  # sqrt(3/2) * Stilde_-
+		s32stmd  = -hbarm0 * (3. / s2) * kp(n  ) * (2 * gamma3_kz)  # sqrt(3/2) * Stilde_-^dagger
+
+		return np.array([
+			[          t1u,            0.0, -hh*ps2*kp(n-1),        0.0, ps6*km(n+1),           0.0,           0.0, -ps3 * km(n+1) ],
+			[          0.0,            t1d,             0.0, -ps6*kp(n),         0.0, hh*ps2*km(n+2), -ps3 * kp(n),            0.0 ],
+			[-hh*ps2*km(n),            0.0,            w1pu,        -sm,         rr1,           0.0,       sm / s2,      -s2 * rr1 ],
+			[          0.0,   -ps6*km(n+1),            -smd,       w1mu,          co,           rr2,         s2v1u,        -s32stm ],
+			[    ps6*kp(n),            0.0,            rrd1,        cod,        w1md,           spd,       -s32stp,         -s2v1d ],
+			[          0.0, hh*ps2*kp(n+1),             0.0,       rrd2,          sp,          w1pd,     s2 * rrd2,        sp / s2 ],
+			[          0.0, -ps3 * km(n+1),        smd / s2,      s2v1u,    -s32stpd,      s2 * rr2,           u1u,             co ],
+			[ -ps3 * kp(n),            0.0,      -s2 * rrd1,   -s32stmd,      -s2v1d,      spd / s2,           cod,            u1d ]])
+	else:
+		return np.array([
+			[          t1u,            0.0, -hh*ps2*kp(n-1),        0.0, ps6*km(n+1),           0.0 ],
+			[          0.0,            t1d,             0.0, -ps6*kp(n),         0.0, hh*ps2*km(n+2)],
+			[-hh*ps2*km(n),            0.0,            w1pu,        -sm,         rr1,           0.0 ],
+			[          0.0,   -ps6*km(n+1),            -smd,       w1mu,          co,           rr2 ],
+			[    ps6*kp(n),            0.0,            rrd1,        cod,        w1md,           spd ],
+			[          0.0, hh*ps2*kp(n+1),             0.0,       rrd2,          sp,          w1pd ]])
+
+def h1zy(z, dz, y, dy, k, params, boundary = 0, lattice_reg = False, axial = True, kterms = None):
+	"""Hamiltonian block H1(kx, y, z); remainder (all except pure kz terms); Version without magnetic field."""
+	# Momenta
+	if isinstance(k, list):
+		kx0 = k[0]
+	else:
+		kx0 = k
+	kz   = params.c_dz  * ( 1 if dz == 1 else -1 if dz == -1 else 0)
+	kz_p = params.c_dz  * ( 1 if dz ==  1 else 0)
+	kz_m = params.c_dz  * (-1 if dz == -1 else 0)
+	# the derivatives are split for proper symmetrization under hermitian conjugation
+	onez = (1 if dz == 0 else 0)  # for diagonal terms
+	oney = (1 if dy == 0 else 0)  # for diagonal terms
+	ddy   =  1 if dy == 1 else -1 if dy == -1 else 0  # first
+	if boundary == 0:     # not at an edge
+		d2dy2 = -2 if dy == 0 else 1 if (dy == 1 or dy == -1) else 0
+	elif boundary ==  1 or boundary == -1:   # at upper/lower edge
+		d2dy2 = -1 if dy == 0 else 1 if (dy == 1 or dy == -1) else 0
+	else:
+		sys.stderr.write("ERROR (h1zy): Boundary number should be -1,0,1\n")
+		exit(1)
+
+	if lattice_reg:
+		cc = params.a_lattice
+		if params.lattice_transformed_by_matrix():
+			kx = lattice_reg_transform(kx0, cc, params.lattice_trans)
+			kx2 = lattice_reg_transform(kx0, cc, params.lattice_trans, quadratic = True)
+		else:
+			kx = sin(cc * kx0) / cc
+			kx2 = (1. - cos(cc * kx0)) * 2. / cc**2
+	else:
+		kx = kx0
+		kx2 = kx**2
+
+	ky = params.c_dy * ddy
+	ky2 = params.c_dy2 * d2dy2
+	kxky = kx * ky
+	kp  = oney * kx + 1.j * ky
+	km  = oney * kx - 1.j * ky
+	k2  = oney * kx2 + ky2
+	kp2 = oney * kx2 + 2.j * kxky - ky2
+	km2 = oney * kx2 - 2.j * kxky - ky2
+	# include oney in kx (-> kx1) and kx2
+	kx1 = oney * kx
+	kx2 = oney * kx2
+
+	# Matrix elements
+	pp_p = params.z(z + 0.5)  # fractional coordinates are perfectly fine
+	pp   = params.z(z + 0)
+	pp_m = params.z(z - 0.5)
+
+	ps2  = sqrt(1 / 2) * pp['P'] * onez
+	ps6  = sqrt(1 / 6) * pp['P'] * onez
+	hh   = 1
+	t1   = hbarm0 * (2 * pp['F'] + 1) * k2 * onez
+	rr   = 0.5 * km2 * hbarm0 * sqrt(3.) * (pp['gamma3'] + pp['gamma2']) * onez  # axial term
+	rrd  = 0.5 * kp2 * hbarm0 * sqrt(3.) * (pp['gamma3'] + pp['gamma2']) * onez
+	if not axial:
+		# strip orientation; phase applies to non-axial term only
+		if params.lattice_transformed_by_angle():
+			phi = params.lattice_orientation[0] * np.pi / 180.
+			kp2_phase = np.exp( 4.j * phi)
+			km2_phase = np.exp(-4.j * phi)
+		else:
+			kp2_phase = 1.0
+			km2_phase = 1.0
+		rr   += -0.5 * kp2 * kp2_phase * hbarm0 * sqrt(3.) * (pp['gamma3'] - pp['gamma2']) * onez  # non-axial term
+		rrd  += -0.5 * km2 * km2_phase * hbarm0 * sqrt(3.) * (pp['gamma3'] - pp['gamma2']) * onez
+	w1p  = -hbarm0 * (pp['gamma1'] + pp['gamma2']) * k2 * onez
+	w1m  = -hbarm0 * (pp['gamma1'] - pp['gamma2']) * k2 * onez
+	gamma3_kz = pp_p['gamma3'] * kz_p + pp_m['gamma3'] * kz_m  # Note: Effectively, the terms have opposite signs, because kz_p and kz_m are defined that way
+	sp   = -hbarm0 * sqrt(3.) * kp * (2 * gamma3_kz - 0.j * pp['dzgamma3'] * onez + 1.j * pp['dzkappa'] * onez)
+	spd  = -hbarm0 * sqrt(3.) * km * (2 * gamma3_kz + 0.j * pp['dzgamma3'] * onez - 1.j * pp['dzkappa'] * onez)
+	sm   = -hbarm0 * sqrt(3.) * km * (2 * gamma3_kz - 0.j * pp['dzgamma3'] * onez + 1.j * pp['dzkappa'] * onez)
+	smd  = -hbarm0 * sqrt(3.) * kp * (2 * gamma3_kz + 0.j * pp['dzgamma3'] * onez - 1.j * pp['dzkappa'] * onez)
+	co   =  2.j * hbarm0 * km * pp['dzkappa'] * onez  # TODO: Sign WTF?
+	cod  = -2.j * hbarm0 * kp * pp['dzkappa'] * onez  # TODO: Sign WTF?
+
+	if params.lattice_transformed_by_matrix():
+		if kterms is None:
+			raise ValueError("Transformation requires argument kterms to be defined")
+		mu_terms = kterms['mu88']
+		gg_terms = kterms['gg88']
+		kappa_terms = kterms['kappa88']
+
+		gamma2_kz = pp_p['gamma2'] * kz_p + pp_m['gamma2'] * kz_m
+		mu_k = (pp['gamma2'] - pp['gamma3']) * onez * (kx2 * mu_terms['xx'] + ky2 * mu_terms['yy'] + kxky * mu_terms['xy']) + (gamma2_kz - gamma3_kz) * (kx1 * mu_terms['xz'] + ky * mu_terms['yz'])
+		gg_k = (pp['gamma2'] + pp['gamma3']) * onez * (kx2 * gg_terms['xx'] + ky2 * gg_terms['yy'] + kxky * gg_terms['xy']) + (gamma2_kz + gamma3_kz) * (kx1 * gg_terms['xz'] + ky * gg_terms['yz'])
+		g1_k = -pp['gamma1'] * np.diag([1,1,1,1]) * k2 * onez
+		kappa_k = pp['dzkappa'] * (kx1 * (kappa_terms['xz'] - kappa_terms['zx']) + ky * (kappa_terms['yz'] - kappa_terms['zy'])) * onez
+		h88_mat = hbarm0 * (mu_k + gg_k + g1_k + kappa_k)
+
+		w1p, w1m = h88_mat[0,0], h88_mat[1,1]
+		rr, rrd = h88_mat[0,2], h88_mat[2,0]
+		sp, spd, sm, smd = h88_mat[3,2], h88_mat[2,3], -h88_mat[0,1], -h88_mat[1,0]
+		co, cod = h88_mat[1,2], h88_mat[2,1]
+
+	if params.norbitals == 8:
+		ps3 = sqrt(1 / 3) * pp['P'] * onez
+		s2 = sqrt(2.)
+		u1 = -hbarm0 * pp['gamma1'] * k2 * onez
+		s2v1 = -s2 * hbarm0 * pp['gamma2'] * k2 * onez
+		s32stp   = -hbarm0 * (3. / s2) * kp * (2 * gamma3_kz - 0.j * pp['dzgamma3'] * onez - (1.j / 3.) * pp['dzkappa'] * onez)  # sqrt(3/2) * Stilde_+
+		s32stpd  = -hbarm0 * (3. / s2) * km * (2 * gamma3_kz + 0.j * pp['dzgamma3'] * onez + (1.j / 3.) * pp['dzkappa'] * onez)  # sqrt(3/2) * Stilde_+^dagger
+		s32stm   = -hbarm0 * (3. / s2) * km * (2 * gamma3_kz - 0.j * pp['dzgamma3'] * onez - (1.j / 3.) * pp['dzkappa'] * onez)  # sqrt(3/2) * Stilde_-
+		s32stmd  = -hbarm0 * (3. / s2) * kp * (2 * gamma3_kz + 0.j * pp['dzgamma3'] * onez + (1.j / 3.) * pp['dzkappa'] * onez)  # sqrt(3/2) * Stilde_-^dagger
+
+		if params.lattice_transformed_by_matrix():
+			mu_terms78 = kterms['mu78']
+			gg_terms78 = kterms['gg78']
+			kappa_terms78 = kterms['kappa78']
+			mu_terms87 = kterms['mu87']
+			gg_terms87 = kterms['gg87']
+			kappa_terms87 = kterms['kappa87']
+
+			mu78_k = (pp['gamma2'] - pp['gamma3']) * onez * (2 * kxky * mu_terms78[2] + (kx2 - ky2) * mu_terms78[3] + (-kx2 - ky2) * mu_terms78[4] / np.sqrt(3.)) + (gamma2_kz - gamma3_kz) * (2 * kx1 * mu_terms78[1] + 2 * ky * mu_terms78[0])  # NOTE: zz term discarded
+			gg78_k = (pp['gamma2'] + pp['gamma3']) * onez * (2 * kxky * gg_terms78[2] + (kx2 - ky2) * gg_terms78[3] + (-kx2 - ky2) * gg_terms78[4] / np.sqrt(3.)) + (gamma2_kz + gamma3_kz) * (2 * kx1 * gg_terms78[1] + 2 * ky * gg_terms78[0])  # NOTE: zz term discarded
+			kappa78_k = 0.5 * pp['dzkappa'] * (kx1 * (kappa_terms78['xz'] - kappa_terms78['zx']) + ky * (kappa_terms78['yz'] - kappa_terms78['zy'])) * onez
+			h78_mat = 3 * hbarm0 * (mu78_k + gg78_k + kappa78_k)
+
+			mu87_k = (pp['gamma2'] - pp['gamma3']) * onez * (2 * kxky * mu_terms87[2] + (kx2 - ky2) * mu_terms87[3] + (-kx2 - ky2) * mu_terms87[4] / np.sqrt(3.)) + (gamma2_kz - gamma3_kz) * (2 * kx1 * mu_terms87[1] + 2 * ky * mu_terms87[0])  # NOTE: zz term discarded
+			gg87_k = (pp['gamma2'] + pp['gamma3']) * onez * (2 * kxky * gg_terms87[2] + (kx2 - ky2) * gg_terms87[3] + (-kx2 - ky2) * gg_terms87[4] / np.sqrt(3.)) + (gamma2_kz + gamma3_kz) * (2 * kx1 * gg_terms87[1] + 2 * ky * gg_terms87[0])  # NOTE: zz term discarded
+			kappa87_k = 0.5 * pp['dzkappa'] * (kx1 * (kappa_terms87['xz'] - kappa_terms87['zx']) + ky * (kappa_terms87['yz'] - kappa_terms87['zy'])) * onez
+			h87_mat = 3 * hbarm0 * (mu87_k + gg87_k + kappa87_k)
+			s2v1 = h78_mat[0,1]
+			s32stpd = -h78_mat[0,2]
+			s32stmd = -h78_mat[1,1]
+			s32stp = -h87_mat[2,0]
+			s32stm = -h87_mat[1,1]
+
+		return np.array([
+			[            t1,           0.0, -hh * ps2 * kp,       0.0, ps6 * km,           0.0,       0.0, -ps3 * km ],
+			[           0.0,            t1,            0.0, -ps6 * kp,      0.0, hh * ps2 * km, -ps3 * kp,       0.0 ],
+			[-hh * ps2 * km,           0.0,            w1p,       -sm,       rr,           0.0,   sm / s2,  -s2 * rr ],
+			[           0.0,     -ps6 * km,           -smd,       w1m,       co,            rr,      s2v1,   -s32stm ],
+			[      ps6 * kp,           0.0,            rrd,       cod,      w1m,           spd,   -s32stp,     -s2v1 ],
+			[           0.0, hh * ps2 * kp,            0.0,       rrd,       sp,           w1p,  s2 * rrd,   sp / s2 ],
+			[           0.0,     -ps3 * km,       smd / s2,      s2v1, -s32stpd,       s2 * rr,        u1,        co ],
+			[     -ps3 * kp,           0.0,      -s2 * rrd,  -s32stmd,    -s2v1,      spd / s2,       cod,        u1 ]])
+	else:
+		return np.array([
+			[            t1,           0.0, -hh * ps2 * kp,       0.0, ps6 * km,           0.0 ],
+			[           0.0,            t1,            0.0, -ps6 * kp,      0.0, hh * ps2 * km ],
+			[-hh * ps2 * km,           0.0,            w1p,       -sm,       rr,           0.0 ],
+			[           0.0,     -ps6 * km,           -smd,       w1m,       co,            rr ],
+			[      ps6 * kp,           0.0,            rrd,       cod,      w1m,           spd ],
+			[           0.0, hh * ps2 * kp,            0.0,       rrd,       sp,           w1p ]])
+
+def h1zy_magn(z, dz, y, dy, k, params, boundary = 0, lattice_reg = False, gauge_zero = 0.0, axial = True, magn = None, ignore_magnxy = False, kterms = None):
+	"""Hamiltonian block H1(kx, y, z); remainder (all except pure kz terms); Version with magnetic field."""
+	magn = 0.0 if magn is None else magn
+	bz = magn.z() if isinstance(magn, Vector) else magn[2] if isinstance(magn, tuple) and len(magn) == 3 else magn  # z component
+	if ignore_magnxy:
+		bx, by = 0, 0
+	else:
+		bx = magn.x() if isinstance(magn, Vector) else magn[0] if isinstance(magn, tuple) and len(magn) == 3 else 0  # x component
+		by = magn.y() if isinstance(magn, Vector) else magn[1] if isinstance(magn, tuple) and len(magn) == 3 else 0  # y component
+
+	if isinstance(k, list):
+		kx0 = k[0]
+	else:
+		kx0 = k
+
+	# Peierls substitution:
+	#   kx -> kx + eAx, ky -> ky + eAy, kz -> kz + eAz
+	# with:
+	#   eAx = (e B / hbar) * (-bz * y + by * z)
+	#   eAy = (e B / hbar) * (-bx * z)
+	#   eAz = 0
+	# The lattice constant yres is included, because y is just an index
+	y0 = params.ymid * (1.0 + gauge_zero)
+	eBz = eoverhbar * bz
+	if ignore_magnxy:
+		eAx = -eoverhbar * bz * params.yres * (y - y0)
+		eAy = 0
+	else:
+		z0 = (params.nz - 1) * 0.5
+		eAx = -eoverhbar * bz * params.yres * (y - y0) + eoverhbar * by * params.zres * (z - z0)
+		eAy = -eoverhbar * bx * params.zres * (z - z0)
+		eBp = eoverhbar * (bx + 1.j * by)
+		eBm = eoverhbar * (bx - 1.j * by)
+		eBx = eoverhbar * bx
+		eBy = eoverhbar * by
+	# if y % 47 == 0:
+	#  	print (z, z0, y, y0, "(%s, %s, 0)" % (eAx, 0 if ignore_magnxy else eAy))
+	"""
+	if lattice_reg:  # DEBUG (old code for comparison)
+		cc = params.aLattice
+		kx = sin(cc * (kx0 + eAx)) / cc
+		kx2 = (1. - cos(cc * (kx0 + eAx))) * 2. / cc**2
+		kx02 = (1. - cos(cc * kx0)) * 2. / cc**2
+		dkx = cos(cc * (kx0 + eAx))
+	else:
+		kx = kx0 + eAx
+		kx2 = kx**2
+		kx02 = kx0**2
+		dkx = 1.
+	"""
+
+	if lattice_reg:
+		cc = params.a_lattice
+		if params.lattice_transformed_by_matrix():
+			kx0 = lattice_reg_transform(kx0, cc, params.lattice_trans)
+			kx02 = lattice_reg_transform(kx0, cc, params.lattice_trans, quadratic = True)
+			kx = lattice_reg_transform(kx0 + eAx, cc, params.lattice_trans)
+			kx2 = lattice_reg_transform(kx0 + eAx, cc, params.lattice_trans, quadratic = True)
+			# TODO:
+			dkx = cos(cc * (kx0 + eAx))
+		else:
+			kx = sin(cc * (kx0 + eAx)) / cc
+			kx2 = (1. - cos(cc * (kx0 + eAx))) * 2. / cc**2
+			kx02 = (1. - cos(cc * kx0)) * 2. / cc**2
+			dkx = cos(cc * (kx0 + eAx))
+	else:
+		kx = kx0 + eAx
+		kx2 = kx**2
+		kx02 = kx0**2
+		dkx = 1.
+
+	# Momenta
+	onez = (1 if dz == 0 else 0)  # for diagonal terms
+	oney = (1 if dy == 0 else 0)  # for diagonal terms
+	ddy   =  1 if dy == 1 else -1 if dy == -1 else 0  # first
+	av_y = 0.5 if dy == 1 or dy == -1 else 0  # for use in kp2, km2
+	if boundary == 0:     # not at an edge
+		d2dy2 = -2 if dy == 0 else 1 if (dy == 1 or dy == -1) else 0
+	elif boundary ==  1 or boundary == -1:   # at upper/lower edge
+		d2dy2 = -1 if dy == 0 else 1 if (dy == 1 or dy == -1) else 0
+	else:
+		sys.stderr.write("ERROR (h1zy_magn): Boundary number should be -1,0,1\n")
+		exit(1)
+	# print ("(%2i, %2i): %2i" % (y, y+dy, d2dy2))
+
+	kz_p = params.c_dz  * ( 1 if dz ==  1 else 0)
+	kz_m = params.c_dz  * (-1 if dz == -1 else 0)
+	# the derivatives are split for proper symmetrization under hermitian conjugation
+
+	# ky, ky^2, k_+, k_-, k^2 = kx^2 + ky^2
+	if ignore_magnxy:
+		ky = params.c_dy * ddy
+		ky2 = params.c_dy2 * d2dy2
+	else:
+		ky = params.c_dy * ddy + oney * eAy
+		ky2 = params.c_dy2 * d2dy2 + 2 * params.c_dy * ddy * eAy + oney * eAy**2
+	kp  = oney * kx + 1.j * ky
+	km  = oney * kx - 1.j * ky
+	k2  = oney * kx2 + ky2
+	kxky = kx * ky  # + 0.5j * eBz * av_y * dkx
+	kp2 = oney * kx2 + 2.j * kxky - ky2
+	km2 = oney * kx2 - 2.j * kxky - ky2
+	# include oney in kx (-> kx1) and kx2
+	kx1 = oney * kx
+	kx2 = oney * kx2
+
+	# k_+^2, k_-^2, method 1 (partial lattice regularization)
+	"""
+	kp2_0 = oney * kx02 + 2.j * kx0 * params.c_dy * ddy - params.c_dy2 * d2dy2
+	km2_0 = oney * kx02 - 2.j * kx0 * params.c_dy * ddy - params.c_dy2 * d2dy2
+	kp2 = kp2_0 + 2.j * oney * eAx * kx0 + 2.j * eAx * params.c_dy * ddy + oney*eAx**2 - eBz * av_y
+	km2 = km2_0 - 2.j * oney * eAx * kx0 - 2.j * eAx * params.c_dy * ddy + oney*eAx**2 + eBz * av_y
+	"""
+
+	# k_+^2, k_-^2, method 2 (full lattice regularization
+	# Note: The latter term involves dkx, which accounts for
+	# the lattice regularization if this is set to true
+	kp2 = oney * kx2 + 2.j * kx * ky - ky2 - eBz * av_y * dkx
+	km2 = oney * kx2 - 2.j * kx * ky - ky2 + eBz * av_y * dkx
+
+	# Matrix elements
+	pp_p = params.z(z + 0.5)  # fractional coordinates are perfectly fine
+	pp   = params.z(z + 0)
+	pp_m = params.z(z - 0.5)
+
+	ps2  = sqrt(1 / 2) * pp['P'] * onez
+	ps6  = sqrt(1 / 6) * pp['P'] * onez
+	hh   = 1
+	t1   = hbarm0 * (2 * pp['F'] + 1) * k2 * onez
+	rr   = 0.5 * km2 * hbarm0 * sqrt(3.) * (pp['gamma3'] + pp['gamma2']) * onez
+	rrd  = 0.5 * kp2 * hbarm0 * sqrt(3.) * (pp['gamma3'] + pp['gamma2']) * onez
+	if not axial:
+		# strip orientation; phase applies to non-axial term only
+		if params.lattice_transformed_by_angle():
+			phi = params.lattice_orientation[0] * np.pi / 180.
+			kp2_phase = np.exp( 4.j * phi)
+			km2_phase = np.exp(-4.j * phi)
+		else:
+			kp2_phase = 1.0
+			km2_phase = 1.0
+		rr   += -0.5 * kp2 * kp2_phase * hbarm0 * sqrt(3.) * (pp['gamma3'] - pp['gamma2']) * onez
+		rrd  += -0.5 * km2 * km2_phase * hbarm0 * sqrt(3.) * (pp['gamma3'] - pp['gamma2']) * onez
+	w1p  = -hbarm0 * (pp['gamma1'] + pp['gamma2']) * k2 * onez
+	w1m  = -hbarm0 * (pp['gamma1'] - pp['gamma2']) * k2 * onez
+	gamma3_kz = pp_p['gamma3'] * kz_p + pp_m['gamma3'] * kz_m  # Note: Effectively, the terms have opposite signs, because kz_p and kz_m are defined that way
+	sp   = -hbarm0 * sqrt(3.) * kp * (2 * gamma3_kz - 0.j * pp['dzgamma3'] * onez + 1.j * pp['dzkappa'] * onez)
+	spd  = -hbarm0 * sqrt(3.) * km * (2 * gamma3_kz + 0.j * pp['dzgamma3'] * onez - 1.j * pp['dzkappa'] * onez)
+	sm   = -hbarm0 * sqrt(3.) * km * (2 * gamma3_kz - 0.j * pp['dzgamma3'] * onez + 1.j * pp['dzkappa'] * onez)
+	smd  = -hbarm0 * sqrt(3.) * kp * (2 * gamma3_kz + 0.j * pp['dzgamma3'] * onez - 1.j * pp['dzkappa'] * onez)
+	co   =  2.j * hbarm0 * km * pp['dzkappa'] * onez  # TODO: Sign WTF?
+	cod  = -2.j * hbarm0 * kp * pp['dzkappa'] * onez  # TODO: Sign WTF?
+
+	if not ignore_magnxy:
+		# extra terms from in-plane gauge field (TODO: check whether this is correct for nontrivial strip orientation)
+		av_zp = (1 if dz ==  1 else 0)
+		av_zm = (1 if dz == -1 else 0)
+		gamma3_av = 0.5 * (pp_p['gamma3'] * av_zp + pp_m['gamma3'] * av_zm)  # note difference in prefactor and signs compared to gamma3_kz
+		sp  += -hbarm0 * sqrt(3.) * -eBp * gamma3_av * oney  # * np.exp(-2.j * phi) ?
+		spd += -hbarm0 * sqrt(3.) *  eBm * gamma3_av * oney  # * np.exp( 2.j * phi) ?
+		sm  += -hbarm0 * sqrt(3.) *  eBm * gamma3_av * oney  # * np.exp( 2.j * phi) ?
+		smd += -hbarm0 * sqrt(3.) * -eBp * gamma3_av * oney  # * np.exp(-2.j * phi) ?
+
+	if params.lattice_transformed_by_matrix() and not params.lattice_transformed_by_angle():
+		if kterms is None:
+			raise ValueError("Transformation requires argument kterms to be defined")
+		mu_terms = kterms['mu88']
+		gg_terms = kterms['gg88']
+		kappa_terms = kterms['kappa88']
+
+		gamma2_kz = pp_p['gamma2'] * kz_p + pp_m['gamma2'] * kz_m
+		mu_k = (pp['gamma2'] - pp['gamma3']) * onez * (kx2 * mu_terms['xx'] + ky2 * mu_terms['yy'] + kxky * mu_terms['xy']) + (gamma2_kz - gamma3_kz) * (kx1 * mu_terms['xz'] + ky * mu_terms['yz'])
+		gg_k = (pp['gamma2'] + pp['gamma3']) * onez * (kx2 * gg_terms['xx'] + ky2 * gg_terms['yy'] + kxky * gg_terms['xy']) + (gamma2_kz + gamma3_kz) * (kx1 * gg_terms['xz'] + ky * gg_terms['yz'])
+		g1_k = -pp['gamma1'] * np.diag([1,1,1,1]) * k2 * onez
+		kappa_k = pp['dzkappa'] * (kx1 * (kappa_terms['xz'] - kappa_terms['zx']) + ky * (kappa_terms['yz'] - kappa_terms['zy'])) * onez
+		h88_mat = hbarm0 * (mu_k + gg_k + g1_k + kappa_k)
+
+		# extra terms from in-plane gauge field
+		delta_mu_k = 0.5j * onez * (pp['gamma2'] - pp['gamma3']) * eBz * av_y * dkx * mu_terms['xy']
+		delta_gg_k = 0.5j * onez * (pp['gamma2'] + pp['gamma3']) * eBz * av_y * dkx * gg_terms['xy']
+		if not ignore_magnxy:
+			# extra terms from in-plane gauge field
+			gamma2_av = 0.5 * (pp_p['gamma2'] * av_zp + pp_m['gamma2'] * av_zm)  # note difference in prefactor and signs compared to gamma2_kz
+			delta_mu_k += 0.5j * oney * (gamma2_av - gamma3_av) * (-eBy * mu_terms['xz'] + eBx * mu_terms['yz'])
+			delta_gg_k += 0.5j * oney * (gamma2_av + gamma3_av) * (-eBy * gg_terms['xz'] + eBx * gg_terms['yz'])
+		h88_mat += hbarm0 * (delta_mu_k + delta_gg_k)
+		w1p, w1m = h88_mat[0,0], h88_mat[1,1]
+		rr, rrd = h88_mat[0,2], h88_mat[2,0]
+		sp, spd, sm, smd = h88_mat[3,2], h88_mat[2,3], -h88_mat[0,1], -h88_mat[1,0]
+		co, cod = h88_mat[1,2], h88_mat[2,1]
+
+
+	if params.norbitals == 8:
+		ps3 = sqrt(1 / 3) * pp['P'] * onez
+		s2 = sqrt(2.)
+		u1 = -hbarm0 * pp['gamma1'] * k2 * onez
+		s2v1 = -s2 * hbarm0 * pp['gamma2'] * k2 * onez
+		s32stp   = -hbarm0 * (3. / s2) * kp * (2 * gamma3_kz - 0.j * pp['dzgamma3'] * onez - (1.j / 3.) * pp['dzkappa'] * onez)  # sqrt(3/2) * Stilde_+
+		s32stpd  = -hbarm0 * (3. / s2) * km * (2 * gamma3_kz + 0.j * pp['dzgamma3'] * onez + (1.j / 3.) * pp['dzkappa'] * onez)  # sqrt(3/2) * Stilde_+^dagger
+		s32stm   = -hbarm0 * (3. / s2) * km * (2 * gamma3_kz - 0.j * pp['dzgamma3'] * onez - (1.j / 3.) * pp['dzkappa'] * onez)  # sqrt(3/2) * Stilde_-
+		s32stmd  = -hbarm0 * (3. / s2) * kp * (2 * gamma3_kz + 0.j * pp['dzgamma3'] * onez + (1.j / 3.) * pp['dzkappa'] * onez)  # sqrt(3/2) * Stilde_-^dagger
+
+		if not ignore_magnxy:
+			# extra terms from in-plane gauge field (TODO: check whether this is correct for nontrivial strip orientation)
+			s32stp  += -hbarm0 * (3. / s2) * -eBp * gamma3_av * oney
+			s32stpd += -hbarm0 * (3. / s2) *  eBm * gamma3_av * oney
+			s32stm  += -hbarm0 * (3. / s2) *  eBm * gamma3_av * oney
+			s32stmd += -hbarm0 * (3. / s2) * -eBp * gamma3_av * oney
+
+		if params.lattice_transformed_by_matrix() and not params.lattice_transformed_by_angle():
+			mu_terms78 = kterms['mu78']
+			gg_terms78 = kterms['gg78']
+			kappa_terms78 = kterms['kappa78']
+			mu_terms87 = kterms['mu87']
+			gg_terms87 = kterms['gg87']
+			kappa_terms87 = kterms['kappa87']
+
+			mu78_k = (pp['gamma2'] - pp['gamma3']) * onez * (2 * kxky * mu_terms78[2] + (kx2 - ky2) * mu_terms78[3] + (-kx2 - ky2) * mu_terms78[4] / np.sqrt(3.)) + (gamma2_kz - gamma3_kz) * (2 * kx1 * mu_terms78[1] + 2 * ky * mu_terms78[0])  # NOTE: zz term discarded
+			gg78_k = (pp['gamma2'] + pp['gamma3']) * onez * (2 * kxky * gg_terms78[2] + (kx2 - ky2) * gg_terms78[3] + (-kx2 - ky2) * gg_terms78[4] / np.sqrt(3.)) + (gamma2_kz + gamma3_kz) * (2 * kx1 * gg_terms78[1] + 2 * ky * gg_terms78[0])  # NOTE: zz term discarded
+			kappa78_k = 0.5 * pp['dzkappa'] * (kx1 * (kappa_terms78['xz'] - kappa_terms78['zx']) + ky * (kappa_terms78['yz'] - kappa_terms78['zy'])) * onez
+
+			mu87_k = (pp['gamma2'] - pp['gamma3']) * onez * (2 * kxky * mu_terms87[2] + (kx2 - ky2) * mu_terms87[3] + (-kx2 - ky2) * mu_terms87[4] / np.sqrt(3.)) + (gamma2_kz - gamma3_kz) * (2 * kx1 * mu_terms87[1] + 2 * ky * mu_terms87[0])  # NOTE: zz term discarded
+			gg87_k = (pp['gamma2'] + pp['gamma3']) * onez * (2 * kxky * gg_terms87[2] + (kx2 - ky2) * gg_terms87[3] + (-kx2 - ky2) * gg_terms87[4] / np.sqrt(3.)) + (gamma2_kz + gamma3_kz) * (2 * kx1 * gg_terms87[1] + 2 * ky * gg_terms87[0])  # NOTE: zz term discarded
+			kappa87_k = 0.5 * pp['dzkappa'] * (kx1 * (kappa_terms87['xz'] - kappa_terms87['zx']) + ky * (kappa_terms87['yz'] - kappa_terms87['zy'])) * onez
+
+			# extra terms from out-of-plane gauge field
+			mu78_k += 1j * onez * (pp['gamma2'] - pp['gamma3']) * eBz * av_y * dkx * mu_terms78[2]
+			gg78_k += 1j * onez * (pp['gamma2'] + pp['gamma3']) * eBz * av_y * dkx * gg_terms78[2]
+			mu87_k += 1j * onez * (pp['gamma2'] - pp['gamma3']) * eBz * av_y * dkx * mu_terms87[2]
+			gg87_k += 1j * onez * (pp['gamma2'] + pp['gamma3']) * eBz * av_y * dkx * gg_terms87[2]
+			if not ignore_magnxy:
+				# extra terms from in-plane gauge field
+				mu78_k += 1j * oney * (gamma2_av - gamma3_av) * (-eBy * mu_terms78[1] + eBx * mu_terms78[0])
+				gg78_k += 1j * oney * (gamma2_av + gamma3_av) * (-eBy * gg_terms78[1] + eBx * gg_terms78[0])
+				mu87_k += 1j * oney * (gamma2_av - gamma3_av) * (-eBy * mu_terms87[1] + eBx * mu_terms87[0])
+				gg87_k += 1j * oney * (gamma2_av + gamma3_av) * (-eBy * gg_terms87[1] + eBx * gg_terms87[0])
+
+			h78_mat = 3 * hbarm0 * (mu78_k + gg78_k + kappa78_k)
+			h87_mat = 3 * hbarm0 * (mu87_k + gg87_k + kappa87_k)
+			s2v1 = h78_mat[0,1]
+			s32stpd = -h78_mat[0,2]
+			s32stmd = -h78_mat[1,1]
+			s32stp = -h87_mat[2,0]
+			s32stm = -h87_mat[1,1]
+
+		return np.array([
+			[            t1,           0.0, -hh * ps2 * kp,       0.0, ps6 * km,           0.0,       0.0, -ps3 * km ],
+			[           0.0,            t1,            0.0, -ps6 * kp,      0.0, hh * ps2 * km, -ps3 * kp,       0.0 ],
+			[-hh * ps2 * km,           0.0,            w1p,       -sm,       rr,           0.0,   sm / s2,  -s2 * rr ],
+			[           0.0,     -ps6 * km,           -smd,       w1m,       co,            rr,      s2v1,   -s32stm ],
+			[      ps6 * kp,           0.0,            rrd,       cod,      w1m,           spd,   -s32stp,     -s2v1 ],
+			[           0.0, hh * ps2 * kp,            0.0,       rrd,       sp,           w1p,  s2 * rrd,   sp / s2 ],
+			[           0.0,     -ps3 * km,       smd / s2,      s2v1, -s32stpd,       s2 * rr,        u1,        co ],
+			[     -ps3 * kp,           0.0,      -s2 * rrd,  -s32stmd,    -s2v1,      spd / s2,       cod,        u1 ]])
+	else:
+		return np.array([
+			[            t1,           0.0, -hh * ps2 * kp,       0.0, ps6 * km,           0.0 ],
+			[           0.0,            t1,            0.0, -ps6 * kp,      0.0, hh * ps2 * km ],
+			[-hh * ps2 * km,           0.0,            w1p,       -sm,       rr,           0.0 ],
+			[           0.0,     -ps6 * km,           -smd,       w1m,       co,            rr ],
+			[      ps6 * kp,           0.0,            rrd,       cod,      w1m,           spd ],
+			[           0.0, hh * ps2 * kp,            0.0,       rrd,       sp,           w1p ]])
+
+# H1 as function of (kx, ky, kz)
+def h1bulk(k, params, lattice_reg = False, axial = True, kterms = None):
+	"""Hamiltonian block H1(kx, ky, kz); remainder (all except pure kz terms)."""
+	# Momenta
+	if isinstance(k, (list, tuple)) and len(k) == 2:  # Needed for bulk_ll calculation in symbolic LL mode
+		k = [k[0], k[1], 0.0]
+	if lattice_reg:
+		cc = params.a_lattice
+		if params.lattice_transformed_by_matrix():
+			kx, ky, kz = lattice_reg_transform(k, cc, params.lattice_trans)
+			kx2, ky2, kz2, kykz, kxkz, kxky = lattice_reg_transform(k, cc, params.lattice_trans, quadratic = True)
+		else:
+			kz = sin(cc * k[2]) / cc
+			kz2 = (1. - cos(cc * k[2])) * 2. / cc**2
+			kx = sin(cc * k[0]) / cc
+			kx2 = (1. - cos(cc * k[0])) * 2. / cc**2
+			ky = sin(cc * k[1]) / cc
+			ky2 = (1. - cos(cc * k[1])) * 2. / cc**2
+			kxky, kxkz, kykz = kx * ky, kx * kz, ky * kz
+	else:
+		kx, ky, kz = k[0], k[1], k[2]
+		kx2, ky2, kz2 = kx**2, ky**2, kz**2
+		kxky, kxkz, kykz = kx * ky, kx * kz, ky * kz
+	k2 = kx2 + ky2
+	kp = kx + 1.j * ky
+	km = kx - 1.j * ky
+	kp2 = kx2 - ky2 + 2.j * kxky
+	km2 = kx2 - ky2 - 2.j * kxky
+
+	# Matrix elements
+	pp = params.z(None)
+	pp['dzkappa'] = 0.0
+	pp['dzgamma3'] = 0.0
+	ps2  = sqrt(1 / 2) * pp['P']
+	ps6  = sqrt(1 / 6) * pp['P']
+	hh   = 1
+	t1   = hbarm0 * (2 * pp['F'] + 1) * k2
+	rr   = 0.5 * km2 * hbarm0 * sqrt(3.) * (pp['gamma3'] + pp['gamma2'])  # axial term
+	rrd  = 0.5 * kp2 * hbarm0 * sqrt(3.) * (pp['gamma3'] + pp['gamma2'])
+	if not axial:
+		rr   += -0.5 * kp2 * hbarm0 * sqrt(3.) * (pp['gamma3'] - pp['gamma2'])  # non-axial term
+		rrd  += -0.5 * km2 * hbarm0 * sqrt(3.) * (pp['gamma3'] - pp['gamma2'])
+	w1p  = -hbarm0 * (pp['gamma1'] + pp['gamma2']) * k2
+	w1m  = -hbarm0 * (pp['gamma1'] - pp['gamma2']) * k2
+	gamma3_kz = pp['gamma3'] * kz
+	sp   = -hbarm0 * sqrt(3.) * kp * (2 * gamma3_kz)  # pp['dzgamma3'] = pp['dzkappa'] = 0
+	spd  = -hbarm0 * sqrt(3.) * km * (2 * gamma3_kz)  # pp['dzgamma3'] = pp['dzkappa'] = 0
+	sm   = -hbarm0 * sqrt(3.) * km * (2 * gamma3_kz)  # pp['dzgamma3'] = pp['dzkappa'] = 0
+	smd  = -hbarm0 * sqrt(3.) * kp * (2 * gamma3_kz)  # pp['dzgamma3'] = pp['dzkappa'] = 0
+	co   = 0   # pp['dzkappa'] = 0
+	cod  = 0   # pp['dzkappa'] = 0
+
+	if params.lattice_transformed_by_matrix():
+		if kterms is None:
+			raise ValueError("Transformation requires argument kterms to be defined")
+		mu_terms = kterms['mu88']
+		gg_terms = kterms['gg88']
+
+		mu_k = kx2 * mu_terms['xx'] + ky2 * mu_terms['yy'] + kxky * mu_terms['xy'] + kxkz * mu_terms['xz'] + kykz * mu_terms['yz']
+		gg_k = kx2 * gg_terms['xx'] + ky2 * gg_terms['yy'] + kxky * gg_terms['xy'] + kxkz * gg_terms['xz'] + kykz * gg_terms['yz']
+		g1_k = np.diag([1,1,1,1]) * k2
+		h88_mat = hbarm0 * ((pp['gamma2'] - pp['gamma3']) * mu_k + (pp['gamma2'] + pp['gamma3']) * gg_k - pp['gamma1'] * g1_k)
+		w1p, w1m = h88_mat[0,0], h88_mat[1,1]
+		rr, rrd = h88_mat[0,2], h88_mat[2,0]
+		sp, spd, sm, smd = h88_mat[3,2], h88_mat[2,3], -h88_mat[0,1], -h88_mat[1,0]
+
+	if params.norbitals == 8:
+		ps3 = sqrt(1 / 3) * pp['P']
+		s2 = sqrt(2.)
+		u1 = -hbarm0 * pp['gamma1'] * k2
+		s2v1 = -s2 * hbarm0 * pp['gamma2'] * k2
+		s32stp   = sqrt(1.5) * sp   # sqrt(3/2) * Stilde_+
+		s32stpd  = sqrt(1.5) * spd  # sqrt(3/2) * Stilde_+^dagger
+		s32stm   = sqrt(1.5) * sm   # sqrt(3/2) * Stilde_-
+		s32stmd  = sqrt(1.5) * smd  # sqrt(3/2) * Stilde_-^dagger
+		if params.lattice_transformed_by_matrix():
+			if kterms is None:
+				raise ValueError("Transformation requires argument kterms to be defined")
+			mu_terms78 = kterms['mu78']
+			gg_terms78 = kterms['gg78']
+			mu78_k = (pp['gamma2'] - pp['gamma3']) * (2 * kykz * mu_terms78[0] + 2 * kxkz * mu_terms78[1] + 2 * kxky * mu_terms78[2] + (kx2 - ky2) * mu_terms78[3] + (-kx2 - ky2) * mu_terms78[4] / np.sqrt(3.))  # NOTE: zz term discarded
+			gg78_k = (pp['gamma2'] + pp['gamma3']) * (2 * kykz * gg_terms78[0] + 2 * kxkz * gg_terms78[1] + 2 * kxky * gg_terms78[2] + (kx2 - ky2) * gg_terms78[3] + (-kx2 - ky2) * gg_terms78[4] / np.sqrt(3.))  # NOTE: zz term discarded
+			h78_mat = 3 * hbarm0 * (mu78_k + gg78_k)
+			s2v1 = h78_mat[0,1]
+			s32stpd = -h78_mat[0,2]
+			s32stmd = -h78_mat[1,1]
+			s32stp = -np.conjugate(h78_mat[0,2])
+			s32stm = -np.conjugate(h78_mat[1,1])
+
+		hmat = np.array([
+			[            t1,           0.0, -hh * ps2 * kp,       0.0, ps6 * km,           0.0,       0.0, -ps3 * km ],
+			[           0.0,            t1,            0.0, -ps6 * kp,      0.0, hh * ps2 * km, -ps3 * kp,       0.0 ],
+			[-hh * ps2 * km,           0.0,            w1p,       -sm,       rr,           0.0,   sm / s2,  -s2 * rr ],
+			[           0.0,     -ps6 * km,           -smd,       w1m,       co,            rr,      s2v1,   -s32stm ],
+			[      ps6 * kp,           0.0,            rrd,       cod,      w1m,           spd,   -s32stp,     -s2v1 ],
+			[           0.0, hh * ps2 * kp,            0.0,       rrd,       sp,           w1p,  s2 * rrd,   sp / s2 ],
+			[           0.0,     -ps3 * km,       smd / s2,      s2v1, -s32stpd,       s2 * rr,        u1,        co ],
+			[     -ps3 * kp,           0.0,      -s2 * rrd,  -s32stmd,    -s2v1,      spd / s2,       cod,        u1 ]])
+	else:
+		hmat = np.array([
+			[            t1,           0.0, -hh * ps2 * kp,       0.0, ps6 * km,           0.0 ],
+			[           0.0,            t1,            0.0, -ps6 * kp,      0.0, hh * ps2 * km ],
+			[-hh * ps2 * km,           0.0,            w1p,       -sm,       rr,           0.0 ],
+			[           0.0,     -ps6 * km,           -smd,       w1m,       co,            rr ],
+			[      ps6 * kp,           0.0,            rrd,       cod,      w1m,           spd ],
+			[           0.0, hh * ps2 * kp,            0.0,       rrd,       sp,           w1p ]])
+	return hmat
+
+def hbia_bulk(k, params, lattice_reg = False, kterms = None):
+	"""Bulk inversion asymmetric terms block Hbia(kx, ky, kz)"""
+	# Momenta
+	if lattice_reg:
+		cc = params.a_lattice
+		if params.lattice_transformed_by_matrix():
+			kx, ky, kz = lattice_reg_transform(k, cc, params.lattice_trans)
+			kx2, ky2, kz2, kykz, kxkz, kxky = lattice_reg_transform(k, cc, params.lattice_trans, quadratic = True)
+		else:
+			kz = sin(cc * k[2]) / cc
+			kz2 = (1. - cos(cc * k[2])) * 2. / cc**2
+			kx = sin(cc * k[0]) / cc
+			kx2 = (1. - cos(cc * k[0])) * 2. / cc**2
+			ky = sin(cc * k[1]) / cc
+			ky2 = (1. - cos(cc * k[1])) * 2. / cc**2
+			kxky, kxkz, kykz = kx * ky, kx * kz, ky * kz
+	else:
+		kx, ky, kz = k[0], k[1], k[2]
+		kx2, ky2, kz2 = kx**2, ky**2, kz**2
+		kxky, kxkz, kykz = kx * ky, kx * kz, ky * kz
+	k2 = kx2 + ky2
+	kk = kx2 - ky2
+	kp = kx + 1.j * ky
+	km = kx - 1.j * ky
+	kpkz = kxkz + 1.j * kykz
+	kmkz = kxkz - 1.j * kykz
+
+	pp = params.z(None)
+	bp = sqrt(1./6.)  * (pp['bia_b8m'] * kk + 2.j * pp['bia_b8p'] * kxky)
+	bm = sqrt(1./6.)  * (pp['bia_b8m'] * kk - 2.j * pp['bia_b8p'] * kxky)
+	bh = sqrt(1./18.) * pp['bia_b8m'] * (k2 - 2 * kz2)
+	bpz = pp['bia_b8p'] * kpkz
+	bmz = pp['bia_b8p'] * kmkz
+	os2 = 1./sqrt(2.)
+	os6 = 1./sqrt(6.)
+	hs3 = sqrt(3.) / 2.
+	hf = 0.5
+	cp = pp['bia_c'] * kp
+	cm = pp['bia_c'] * km
+	cz = pp['bia_c'] * kz
+
+	if params.norbitals == 8:
+		b7p = sqrt(1./3.) * pp['bia_b7'] * kpkz
+		b7m = sqrt(1./3.) * pp['bia_b7'] * kmkz
+		b7i = 1.j * sqrt(1./3.) * pp['bia_b7'] * kxky
+		s12 = sqrt(1./2.)
+		s18 = 0.5 * s12
+		s38 = sqrt(3./8.)
+		hmat = np.array([
+			[    0.0,     0.0, os2*bmz,      bp, os6*bpz,      bh,   -b7i,    -b7p],
+			[    0.0,     0.0,     -bh, os6*bmz,     -bm, os2*bpz,    b7m,     b7i],
+			[os2*bpz,     -bh,     0.0,  -hf*cp,      cz, -hs3*cm, s18*cp,  s12*cz],
+			[     bm, os6*bpz,  -hf*cm,     0.0,  hs3*cp,     -cz,  0.0,   -s38*cp],
+			[os6*bmz,     -bp,      cz,  hs3*cm,     0.0,  -hf*cp, s38*cm,     0.0],
+			[     bh, os2*bmz, -hs3*cp,     -cz,  -hf*cm,     0.0, s12*cz, -s18*cm],
+			[    b7i,     b7p,  s18*cm,     0.0,  s38*cp,  s12*cz,    0.0,     0.0],
+			[   -b7m,    -b7i,  s12*cz, -s38*cm,     0.0, -s18*cp,    0.0,     0.0]])
+	else:
+		hmat = np.array([
+			[    0.0,     0.0, os2*bmz,      bp, os6*bpz,      bh],
+			[    0.0,     0.0,     -bh, os6*bmz,     -bm, os2*bpz],
+			[os2*bpz,     -bh,     0.0,  -hf*cp,      cz, -hs3*cm],
+			[     bm, os6*bpz,  -hf*cm,     0.0,  hs3*cp,     -cz],
+			[os6*bmz,     -bp,      cz,  hs3*cm,     0.0,  -hf*cp],
+			[     bh, os2*bmz, -hs3*cp,     -cz,  -hf*cm,     0.0]])
+
+	if params.lattice_transformed_by_matrix():
+		if kterms is None:
+			raise ValueError("Transformation requires argument kterms to be defined")
+
+		c88_terms = kterms['bia_c88']
+		c88_k = (pp['bia_c'] / np.sqrt(3)) * (c88_terms['x'] * kx + c88_terms['y'] * ky + c88_terms['z'] * kz)
+
+		b8p_terms = kterms['bia_b8p']
+		b8p_k = (pp['bia_b8p'] * 0.5j * np.sqrt(3)) * (b8p_terms['xy'] * kxky + b8p_terms['xz'] * kxkz + b8p_terms['yz'] * kykz + b8p_terms['xx'] * kx2 + b8p_terms['yy'] * ky2 + b8p_terms['zz'] * kz2)
+
+		## Basis for Tij and quadratic k forms in 5-dimensional representation.
+		k5basis = [2 * kykz, 2 * kxkz, 2 * kxky, kx2 - ky2, (2 * kz2 - kx2 - ky2) / np.sqrt(3.)]
+
+		b8m_terms = kterms['bia_b8m']
+		b8m_k = -0.5 * pp['bia_b8m'] * np.sum([k5basis[j] * b8m_terms[j] for j in range(0, 5)], axis = 0)
+		b8_k = np.asmatrix(b8p_k + b8m_k)
+
+		hmat1 = np.zeros((params.norbitals, params.norbitals), dtype = complex)
+		hmat1[2:6, 2:6] = c88_k
+		hmat1[0:2, 2:6] = b8_k
+		hmat1[2:6, 0:2] = b8_k.conjugate().transpose()
+
+		if params.norbitals == 8:
+			b7_terms = kterms['bia_b7']
+			b7_k = np.asmatrix((pp['bia_b7'] * -0.5j / np.sqrt(3)) * (b7_terms['xy'] * kxky + b7_terms['xz'] * kxkz + b7_terms['yz'] * kykz + b7_terms['xx'] * kx2 + b7_terms['yy'] * ky2 + b7_terms['zz'] * kz2))
+
+			c87_terms = kterms['bia_c87']
+			c87_k = np.asmatrix((pp['bia_c'] * 0.5j * np.sqrt(3)) * (c87_terms[0] * kx + c87_terms[1] * ky + c87_terms[2] * kz))
+
+			hmat1[0:2, 6:8] = b7_k
+			hmat1[6:8, 0:2] = b7_k.conjugate().transpose()
+			hmat1[6:8, 2:6] = c87_k
+			hmat1[2:6, 6:8] = c87_k.conjugate().transpose()
+		hmat = hmat1
+	return hmat
+
+def hz_bia(z, dz, k, params, lattice_reg = False, magn = None, ignore_magnxy = False, kterms = None):
+	"""Bulk inversion asymmetric terms block Hbia(kx, ky, z). In-plane magnetic fields included. No Bz!
+	There is no separate version without in-plane magnetic fields."""
+	# Momenta
+	one = (1 if dz == 0 else 0)  # for diagonal terms
+	kz_p = params.c_dz  * ( 1 if dz ==  1 else 0)
+	kz_m = params.c_dz  * (-1 if dz == -1 else 0)
+	kz2_p = params.c_dz2 * ( 1 if dz ==  1 else -1 if dz == 0 else 0)
+	kz2_m = params.c_dz2 * ( 1 if dz == -1 else -1 if dz == 0 else 0)
+	# the derivatives are split for proper symmetrization under hermitian conjugation
+
+	magn = 0.0 if magn is None else magn
+	if isinstance(magn, Vector):
+		bx, by, bz = magn.xyz()
+	elif isinstance(magn, tuple) and len(magn) == 3:
+		bx, by, bz = magn
+	elif isinstance(magn, (int, float, np.integer, np.floating)):
+		bx, by, bz = 0, 0, magn
+	else:
+		raise TypeError("Invalid type for variable magn")
+
+	magnxy = not ignore_magnxy and (abs(bx) > 1e-9 or abs(by) > 1e-9)  # do we consider in-plane magnetic field
+	if magnxy:  # in-plane field
+		# Peierls substitution:
+		#   kx -> kx + eAx, ky -> ky + eAy, kz -> kz + eAz
+		# with:
+		#   eAx = (e B / hbar) * ( by * z)
+		#   eAy = (e B / hbar) * (-bx * z)
+		#   eAz = 0
+		# Note that bz is ignored, by design!
+		# In this geometry, we can simply shift the momenta kx, ky. (This is
+		# not possible if Bz != 0, see h1zy_magn.) Note however the k+ kz and
+		# k- kz terms in sp, spd, sm, smd.
+		# The lattice constant zres is included, because z is just an index
+		z0 = (params.nz - 1) * 0.5
+		eAx = eoverhbar * by * params.zres * (z - z0)
+		eAy = -eoverhbar * bx * params.zres * (z - z0)
+		eBx = eoverhbar * bx
+		eBy = eoverhbar * by
+		eBp = eoverhbar * (bx + 1.j * by)
+		eBm = eoverhbar * (bx - 1.j * by)
+		k = [k[0] + eAx, k[1] + eAy]
+
+	if lattice_reg:
+		cc = params.a_lattice
+		if params.lattice_transformed_by_matrix():
+			kx, ky = lattice_reg_transform(k, cc, params.lattice_trans)
+			kx2, ky2, kxky = lattice_reg_transform(k, cc, params.lattice_trans, quadratic = True)
+		else:
+			kx = sin(cc * k[0]) / cc
+			kx2 = (1. - cos(cc * k[0])) * 2. / cc**2
+			ky = sin(cc * k[1]) / cc
+			ky2 = (1. - cos(cc * k[1])) * 2. / cc**2
+			kxky = kx * ky
+	else:
+		kx, ky = k[0], k[1]
+		kx2, ky2 = kx**2, ky**2
+		kxky = kx * ky
+	k2 = kx2 + ky2
+	kk = kx2 - ky2
+	kp = kx + 1.j * ky
+	km = kx - 1.j * ky
+
+	# Matrix elements
+	pp_p = params.z(z + 0.5)  # fractional coordinates are perfectly fine
+	pp_0 = params.z(z + 0)
+	pp_m = params.z(z - 0.5)
+
+	bp = sqrt(1./6.)  * (pp_0['bia_b8m'] * kk + 2.j * pp_0['bia_b8p'] * kxky) * one
+	bm = sqrt(1./6.)  * (pp_0['bia_b8m'] * kk - 2.j * pp_0['bia_b8p'] * kxky) * one
+	bh = sqrt(1./18.) * (pp_0['bia_b8m'] * k2 * one - 2 * kz2_p * pp_p['bia_b8m'] - 2 * kz2_m * pp_m['bia_b8m'])
+	bpz = (pp_p['bia_b8p'] * kz_p + pp_m['bia_b8p'] * kz_m) * kp
+	bmz = (pp_p['bia_b8p'] * kz_p + pp_m['bia_b8p'] * kz_m) * km
+	if magnxy:
+		# extra terms from in-plane gauge field
+		av_zp = (1 if dz ==  1 else 0)
+		av_zm = (1 if dz == -1 else 0)
+		b8p_av = 0.5 * (pp_p['bia_b8p'] * av_zp + pp_m['bia_b8p'] * av_zm)
+		bpz += 0.5 * b8p_av * -eBp
+		bmz += 0.5 * b8p_av * eBm
+
+	os2 = 1./sqrt(2.)
+	os6 = 1./sqrt(6.)
+	hs3 = sqrt(3.) / 2.
+	hf = 0.5
+	cp = pp_0['bia_c'] * kp * one
+	cm = pp_0['bia_c'] * km * one
+	cz = pp_p['bia_c'] * kz_p + pp_m['bia_c'] * kz_m
+
+	if params.norbitals == 8:
+		b7p = sqrt(1./3.) * (pp_p['bia_b7'] * kz_p + pp_m['bia_b7'] * kz_m) * kp
+		b7m = sqrt(1./3.) * (pp_p['bia_b7'] * kz_p + pp_m['bia_b7'] * kz_m) * km
+		b7i = 1.j * sqrt(1./3.) * pp_0['bia_b7'] * kxky * one
+		if magnxy:
+			# extra terms from in-plane gauge field
+			b7_av = 0.5 * (pp_p['bia_b7'] * av_zp + pp_m['bia_b7'] * av_zm)
+			b7p += 0.5 * sqrt(1./3.) * b7_av * -eBp
+			b7m += 0.5 * sqrt(1./3.) * b7_av * eBm
+
+		s12 = sqrt(1./2.)
+		s18 = 0.5 * s12
+		s38 = sqrt(3./8.)
+		hmat = np.array([
+			[    0.0,     0.0, os2*bmz,      bp, os6*bpz,      bh,   -b7i,    -b7p],
+			[    0.0,     0.0,     -bh, os6*bmz,     -bm, os2*bpz,    b7m,     b7i],
+			[os2*bpz,     -bh,     0.0,  -hf*cp,      cz, -hs3*cm, s18*cp,  s12*cz],
+			[     bm, os6*bpz,  -hf*cm,     0.0,  hs3*cp,     -cz,  0.0,   -s38*cp],
+			[os6*bmz,     -bp,      cz,  hs3*cm,     0.0,  -hf*cp, s38*cm,     0.0],
+			[     bh, os2*bmz, -hs3*cp,     -cz,  -hf*cm,     0.0, s12*cz, -s18*cm],
+			[    b7i,     b7p,  s18*cm,     0.0,  s38*cp,  s12*cz,    0.0,     0.0],
+			[   -b7m,    -b7i,  s12*cz, -s38*cm,     0.0, -s18*cp,    0.0,     0.0]])
+	else:
+		hmat = np.array([
+			[    0.0,     0.0, os2*bmz,      bp, os6*bpz,      bh],
+			[    0.0,     0.0,     -bh, os6*bmz,     -bm, os2*bpz],
+			[os2*bpz,     -bh,     0.0,  -hf*cp,      cz, -hs3*cm],
+			[     bm, os6*bpz,  -hf*cm,     0.0,  hs3*cp,     -cz],
+			[os6*bmz,     -bp,      cz,  hs3*cm,     0.0,  -hf*cp],
+			[     bh, os2*bmz, -hs3*cp,     -cz,  -hf*cm,     0.0]])
+
+	if params.lattice_transformed_by_matrix():
+		if kterms is None:
+			raise ValueError("Transformation requires argument kterms to be defined")
+
+		c88_terms = kterms['bia_c88']
+		c88_k_p = (pp_p['bia_c'] / np.sqrt(3)) * (c88_terms['z'] * kz_p)
+		c88_k_0 = (pp_0['bia_c'] / np.sqrt(3)) * one * (c88_terms['x'] * kx + c88_terms['y'] * ky)
+		c88_k_m = (pp_m['bia_c'] / np.sqrt(3)) * (c88_terms['z'] * kz_m)
+		c88_k = c88_k_p + c88_k_0 + c88_k_m
+
+		b8p_terms = kterms['bia_b8p']
+		b8p_termsH = kterms['bia_b8pH']  # hermitian conjugate
+		# NOTE: We cannot just conjugate the result, since that will also conjugate
+		# the derivatives. First order derivatives would pick up an undesired sign
+		# under conjugation.
+		b68p_k_p = (pp_p['bia_b8p'] * 0.5j * np.sqrt(3)) * (b8p_terms['xz'] * kx * kz_p + b8p_terms['yz'] * ky * kz_p + b8p_terms['zz'] * kz2_p)
+		b68p_k_0 = (pp_0['bia_b8p'] * 0.5j * np.sqrt(3)) * one * (b8p_terms['xy'] * kxky + b8p_terms['xx'] * kx2 + b8p_terms['yy'] * ky2)
+		b68p_k_m = (pp_m['bia_b8p'] * 0.5j * np.sqrt(3)) * (b8p_terms['xz'] * kx * kz_m + b8p_terms['yz'] * ky * kz_m + b8p_terms['zz'] * kz2_m)
+		b86p_k_p = (pp_p['bia_b8p'] * -0.5j * np.sqrt(3)) * (b8p_termsH['xz'] * kx * kz_p + b8p_termsH['yz'] * ky * kz_p + b8p_termsH['zz'] * kz2_p)
+		b86p_k_0 = (pp_0['bia_b8p'] * -0.5j * np.sqrt(3)) * one * (b8p_termsH['xy'] * kxky + b8p_termsH['xx'] * kx2 + b8p_termsH['yy'] * ky2)
+		b86p_k_m = (pp_m['bia_b8p'] * -0.5j * np.sqrt(3)) * (b8p_termsH['xz'] * kx * kz_m + b8p_termsH['yz'] * ky * kz_m + b8p_termsH['zz'] * kz2_m)
+		b68p_k = b68p_k_p + b68p_k_0 + b68p_k_m
+		b86p_k = b86p_k_p + b86p_k_0 + b86p_k_m
+
+		## Basis for quadratic k forms in 5-dimensional representation.
+		k5basis_p = [2 * ky * kz_p, 2 * kx * kz_p, 0, 0, (2 * kz2_p) / np.sqrt(3.)]
+		k5basis_0 = [0, 0, 2 * kxky * one, (kx2 - ky2) * one, (-kx2 - ky2) * one / np.sqrt(3.)]
+		k5basis_m = [2 * ky * kz_m, 2 * kx * kz_m, 0, 0, (2 * kz2_m) / np.sqrt(3.)]
+		b8m_terms = kterms['bia_b8m']
+		b8m_termsH = kterms['bia_b8mH']  # hermitian conjugate
+		b68m_k = -0.5 * np.sum([(pp_p['bia_b8m'] * k5basis_p[j] + pp_0['bia_b8m'] * k5basis_0[j] + pp_m['bia_b8m'] * k5basis_m[j]) * b8m_terms[j] for j in range(0, 5)], axis = 0)
+		b86m_k = -0.5 * np.sum([(pp_p['bia_b8m'] * k5basis_p[j] + pp_0['bia_b8m'] * k5basis_0[j] + pp_m['bia_b8m'] * k5basis_m[j]) * b8m_termsH[j] for j in range(0, 5)], axis = 0)
+		b68_k = np.asmatrix(b68p_k + b68m_k)
+		b86_k = np.asmatrix(b86p_k + b86m_k)
+
+		if magnxy:
+			# extra terms from in-plane gauge field
+			b8p_av = 0.5 * (pp_p['bia_b8p'] * av_zp + pp_m['bia_b8p'] * av_zm)  # see also above
+			b8m_av = 0.5 * (pp_p['bia_b8m'] * av_zp + pp_m['bia_b8m'] * av_zm)
+			delta_b68p_k = 0.5j * np.sqrt(3) * (0.5j * b8p_av * (-eBy * b8p_terms['xz'] + eBx * b8p_terms['yz']))
+			delta_b68m_k = -0.5j * b8m_av * (-eBy * b8m_terms['1'] + eBx * b8m_terms['0'])
+			delta_b86p_k = -0.5j * np.sqrt(3) * (0.5j * b8p_av * (-eBy * b8p_termsH['xz'] + eBx * b8p_termsH['yz']))
+			delta_b86m_k = -0.5j * b8m_av * (-eBy * b8m_termsH['1'] + eBx * b8m_termsH['0'])
+			b68_k += np.asmatrix(delta_b68p_k + delta_b68m_k)
+			b86_k += np.asmatrix(delta_b86p_k + delta_b86m_k)
+
+		hmat1 = np.zeros((params.norbitals, params.norbitals), dtype = complex)
+		hmat1[2:6, 2:6] = c88_k
+		hmat1[0:2, 2:6] = b68_k
+		hmat1[2:6, 0:2] = b86_k
+
+		if params.norbitals == 8:
+			b7_terms = kterms['bia_b7']  # matrices are hermitian
+			b67_k_p = (pp_p['bia_b7'] * -0.5j / np.sqrt(3)) * (b7_terms['xz'] * kx * kz_p + b7_terms['yz'] * ky * kz_p + b7_terms['zz'] * kz2_p)
+			b67_k_0 = (pp_0['bia_b7'] * -0.5j / np.sqrt(3)) * one * (b7_terms['xy'] * kxky + b7_terms['xx'] * kx2 + b7_terms['yy'] * ky2)
+			b67_k_m = (pp_m['bia_b7'] * -0.5j / np.sqrt(3)) * (b7_terms['xz'] * kx * kz_m + b7_terms['yz'] * ky * kz_m + b7_terms['zz'] * kz2_m)
+			b67_k = np.asmatrix(b67_k_p + b67_k_0 + b67_k_m)
+			b76_k = -b67_k  # Note: b67_k^dagger = -b67_k
+
+			if magnxy:
+				# extra terms from in-plane gauge field
+				b7_av = 0.5 * (pp_p['bia_b7'] * av_zp + pp_m['bia_b7'] * av_zm)  # see also above
+				delta_b67_k = -0.5j / np.sqrt(3) * (0.5j * b7_av * (-eBy * b7_terms['xz'] + eBx * b7_terms['yz']))
+				delta_b76_k = -delta_b67_k
+				b67_k += np.asmatrix(delta_b67_k)
+				b76_k += np.asmatrix(delta_b76_k)
+
+			c87_terms = kterms['bia_c87']
+			c87_termsH = kterms['bia_c87H']  # hermitian conjugate
+			c78_k_p = (pp_p['bia_c'] * 0.5j * np.sqrt(3)) * (c87_terms[2] * kz_p)
+			c78_k_0 = (pp_0['bia_c'] * 0.5j * np.sqrt(3)) * one * (c87_terms[0] * kx + c87_terms[1] * ky)
+			c78_k_m = (pp_m['bia_c'] * 0.5j * np.sqrt(3)) * (c87_terms[2] * kz_m)
+			c87_k_p = (pp_p['bia_c'] * -0.5j * np.sqrt(3)) * (c87_termsH[2] * kz_p)
+			c87_k_0 = (pp_0['bia_c'] * -0.5j * np.sqrt(3)) * one * (c87_termsH[0] * kx + c87_termsH[1] * ky)
+			c87_k_m = (pp_m['bia_c'] * -0.5j * np.sqrt(3)) * (c87_termsH[2] * kz_m)
+			c78_k = np.asmatrix(c78_k_p + c78_k_0 + c78_k_m)
+			c87_k = np.asmatrix(c87_k_p + c87_k_0 + c87_k_m)
+
+			hmat1[0:2, 6:8] = b67_k
+			hmat1[6:8, 0:2] = b76_k
+			hmat1[6:8, 2:6] = c78_k
+			hmat1[2:6, 6:8] = c87_k
+		hmat = hmat1
+	return hmat
+
+def hzy_bia(z, dz, y, dy, k, params, boundary = 0, lattice_reg = False, gauge_zero = 0.0, magn = None, ignore_magnxy = False, kterms = None):
+	"""Bulk inversion asymmetric terms block Hbia(kx, y, z).
+	Magnetic fields included. There is no separate version without magnetic fields."""
+	magn = 0.0 if magn is None else magn
+	bz = magn.z() if isinstance(magn, Vector) else magn[2] if isinstance(magn, tuple) and len(magn) == 3 else magn  # z component
+	if ignore_magnxy:
+		bx, by = 0, 0
+	else:
+		bx = magn.x() if isinstance(magn, Vector) else magn[0] if isinstance(magn, tuple) and len(magn) == 3 else 0  # x component
+		by = magn.y() if isinstance(magn, Vector) else magn[1] if isinstance(magn, tuple) and len(magn) == 3 else 0  # y component
+
+	if isinstance(k, list):
+		kx0 = k[0]
+	else:
+		kx0 = k
+
+	# Peierls substitution:
+	#   kx -> kx + eAx, ky -> ky + eAy, kz -> kz + eAz
+	# with:
+	#   eAx = (e B / hbar) * (-bz * y + by * z)
+	#   eAy = (e B / hbar) * (-bx * z)
+	#   eAz = 0
+	# The lattice constant yres is included, because y is just an index
+	y0 = params.ymid * (1.0 + gauge_zero)
+	eBz = eoverhbar * bz
+	if ignore_magnxy:
+		eAx = -eoverhbar * bz * params.yres * (y - y0)
+		eAy = 0
+	else:
+		z0 = (params.nz - 1) * 0.5
+		eAx = -eoverhbar * bz * params.yres * (y - y0) + eoverhbar * by * params.zres * (z - z0)
+		eAy = -eoverhbar * bx * params.zres * (z - z0)
+		eBp = eoverhbar * (bx + 1.j * by)
+		eBm = eoverhbar * (bx - 1.j * by)
+		eBx = eoverhbar * bx
+		eBy = eoverhbar * by
+
+	if lattice_reg:
+		cc = params.a_lattice
+		if params.lattice_transformed_by_matrix():
+			kx0 = lattice_reg_transform(kx0, cc, params.lattice_trans)
+			kx02 = lattice_reg_transform(kx0, cc, params.lattice_trans, quadratic = True)
+			kx = lattice_reg_transform(kx0 + eAx, cc, params.lattice_trans)
+			kx2 = lattice_reg_transform(kx0 + eAx, cc, params.lattice_trans, quadratic = True)
+			# TODO:
+			dkx = cos(cc * (kx0 + eAx))
+		else:
+			kx = sin(cc * (kx0 + eAx)) / cc
+			kx2 = (1. - cos(cc * (kx0 + eAx))) * 2. / cc**2
+			kx02 = (1. - cos(cc * kx0)) * 2. / cc**2
+			dkx = cos(cc * (kx0 + eAx))
+	else:
+		kx = kx0 + eAx
+		kx2 = kx**2
+		kx02 = kx0**2
+		dkx = 1.
+
+	# Momenta
+	onez = (1 if dz == 0 else 0)  # for diagonal terms
+	oney = (1 if dy == 0 else 0)  # for diagonal terms
+	ddy   =  1 if dy == 1 else -1 if dy == -1 else 0  # first
+	av_y = 0.5 if dy == 1 or dy == -1 else 0  # for use in kp2, km2
+	if boundary == 0:     # not at an edge
+		d2dy2 = -2 if dy == 0 else 1 if (dy == 1 or dy == -1) else 0
+	elif boundary ==  1 or boundary == -1:   # at upper/lower edge
+		d2dy2 = -1 if dy == 0 else 1 if (dy == 1 or dy == -1) else 0
+	else:
+		sys.stderr.write("ERROR (h1zy_magn): Boundary number should be -1,0,1\n")
+		exit(1)
+	# print ("(%2i, %2i): %2i" % (y, y+dy, d2dy2))
+
+	kz_p = params.c_dz  * ( 1 if dz ==  1 else 0)
+	kz_m = params.c_dz  * (-1 if dz == -1 else 0)
+	kz2_p = params.c_dz2 * ( 1 if dz ==  1 else -1 if dz == 0 else 0)
+	kz2_m = params.c_dz2 * ( 1 if dz == -1 else -1 if dz == 0 else 0)
+
+	# ky, ky^2, k_+, k_-, k^2 = kx^2 + ky^2, kk = kx^2 - ky^2, kxky = kx ky
+	if ignore_magnxy:
+		ky = params.c_dy * ddy
+		ky2 = params.c_dy2 * d2dy2
+	else:
+		ky = params.c_dy * ddy + oney * eAy
+		ky2 = params.c_dy2 * d2dy2 + 2 * params.c_dy * ddy * eAy + oney * eAy**2
+	kp  = oney * kx + 1.j * ky
+	km  = oney * kx - 1.j * ky
+	k2  = oney * kx2 + ky2
+	kk  = oney * kx2 - ky2
+	kxky = kx * ky  # + 0.5j * eBz * av_y * dkx
+	kp2 = oney * kx2 + 2.j * kxky - ky2
+	km2 = oney * kx2 - 2.j * kxky - ky2
+	# include oney in kx (-> kx1) and kx2
+	kx1 = oney * kx
+	kx2 = oney * kx2
+
+	# strip orientation; note: additional unitary transformation at the end
+	if isinstance(params.lattice_trans, (int, float, np.integer, np.floating)) and np.abs(params.lattice_trans) > 1e-6:
+		phi = params.lattice_trans * np.pi / 180.
+		kp *= np.exp( 1.j * phi)
+		km *= np.exp(-1.j * phi)
+		kk, kxky = np.cos(2 * phi) * kk - np.sin(2 * phi) * 2 * kxky, 0.5 * np.sin(2 * phi) * kk + np.cos(2 * phi) * kxky
+		if not ignore_magnxy:
+			eBp *= np.exp( 1.j * phi)
+			eBm *= np.exp(-1.j * phi)
+
+	# Matrix elements
+	pp_p = params.z(z + 0.5)  # fractional coordinates are perfectly fine
+	pp_0 = params.z(z + 0)
+	pp_m = params.z(z - 0.5)
+
+	bp = sqrt(1./6.)  * (pp_0['bia_b8m'] * kk + 2.j * pp_0['bia_b8p'] * kxky) * onez
+	bm = sqrt(1./6.)  * (pp_0['bia_b8m'] * kk - 2.j * pp_0['bia_b8p'] * kxky) * onez
+	bh = sqrt(1./18.) * (pp_0['bia_b8m'] * k2 * onez - 2 * (kz2_p * pp_p['bia_b8m'] + kz2_m * pp_m['bia_b8m']) * oney)
+	bpz = (pp_p['bia_b8p'] * kz_p + pp_m['bia_b8p'] * kz_m) * kp
+	bmz = (pp_p['bia_b8p'] * kz_p + pp_m['bia_b8p'] * kz_m) * km
+	# Effect of the magnetic field:
+	bp += sqrt(1./6.) * onez * pp_0['bia_b8p'] * -eBz * av_y * dkx
+	bm += -sqrt(1./6.) * onez * pp_0['bia_b8p'] * -eBz * av_y * dkx
+	if not ignore_magnxy:
+		# extra terms from in-plane gauge field
+		av_zp = (1 if dz ==  1 else 0)
+		av_zm = (1 if dz == -1 else 0)
+		b8p_av = 0.5 * (pp_p['bia_b8p'] * av_zp + pp_m['bia_b8p'] * av_zm)
+		bpz += 0.5 * b8p_av * oney * -eBp
+		bmz += 0.5 * b8p_av * oney * eBm
+
+	os2 = 1./sqrt(2.)
+	os6 = 1./sqrt(6.)
+	hs3 = sqrt(3.) / 2.
+	hf = 0.5
+	cp = pp_0['bia_c'] * kp * onez
+	cm = pp_0['bia_c'] * km * onez
+	cz = (pp_p['bia_c'] * kz_p + pp_m['bia_c'] * kz_m) * oney
+
+	if params.norbitals == 8:
+		b7p = sqrt(1./3.) * (pp_p['bia_b7'] * kz_p + pp_m['bia_b7'] * kz_m) * kp
+		b7m = sqrt(1./3.) * (pp_p['bia_b7'] * kz_p + pp_m['bia_b7'] * kz_m) * km
+		b7i = 1.j * sqrt(1./3.) * pp_0['bia_b7'] * kxky * onez
+		# Effect of the magnetic field:
+		b7i += 0.5 * sqrt(1./3.) * onez * pp_0['bia_b7'] * -eBz * av_y * dkx
+		if not ignore_magnxy:
+			# extra terms from in-plane gauge field
+			b7_av = 0.5 * (pp_p['bia_b7'] * av_zp + pp_m['bia_b7'] * av_zm)
+			b7p += 0.5 * sqrt(1./3.) * oney * b7_av * -eBp
+			b7m += 0.5 * sqrt(1./3.) * oney * b7_av * eBm
+
+		s12 = sqrt(1./2.)
+		s18 = 0.5 * s12
+		s38 = sqrt(3./8.)
+		hmat = np.array([
+			[    0.0,     0.0, os2*bmz,      bp, os6*bpz,      bh,   -b7i,    -b7p],
+			[    0.0,     0.0,     -bh, os6*bmz,     -bm, os2*bpz,    b7m,     b7i],
+			[os2*bpz,     -bh,     0.0,  -hf*cp,      cz, -hs3*cm, s18*cp,  s12*cz],
+			[     bm, os6*bpz,  -hf*cm,     0.0,  hs3*cp,     -cz,  0.0,   -s38*cp],
+			[os6*bmz,     -bp,      cz,  hs3*cm,     0.0,  -hf*cp, s38*cm,     0.0],
+			[     bh, os2*bmz, -hs3*cp,     -cz,  -hf*cm,     0.0, s12*cz, -s18*cm],
+			[    b7i,     b7p,  s18*cm,     0.0,  s38*cp,  s12*cz,    0.0,     0.0],
+			[   -b7m,    -b7i,  s12*cz, -s38*cm,     0.0, -s18*cp,    0.0,     0.0]])
+	else:
+		hmat = np.array([
+			[    0.0,     0.0, os2*bmz,      bp, os6*bpz,      bh],
+			[    0.0,     0.0,     -bh, os6*bmz,     -bm, os2*bpz],
+			[os2*bpz,     -bh,     0.0,  -hf*cp,      cz, -hs3*cm],
+			[     bm, os6*bpz,  -hf*cm,     0.0,  hs3*cp,     -cz],
+			[os6*bmz,     -bp,      cz,  hs3*cm,     0.0,  -hf*cp],
+			[     bh, os2*bmz, -hs3*cp,     -cz,  -hf*cm,     0.0]])
+	if isinstance(params.lattice_trans, (int, float, np.integer, np.floating)) and np.abs(params.lattice_trans) > 1e-6:
+		jzval = np.array([0.5, -0.5, 1.5, 0.5, -0.5, -1.5, 0.5, -0.5])[:params.norbitals]
+		u = np.diag(np.exp(-1.j * jzval * phi))
+		ud = np.diag(np.exp(1.j * jzval * phi))
+		return ud * (hmat * u)
+	elif params.lattice_transformed_by_matrix():
+		if kterms is None:
+			raise ValueError("Transformation requires argument kterms to be defined")
+
+		c88_terms = kterms['bia_c88']
+		c88_k_p = (pp_p['bia_c'] / np.sqrt(3)) * oney * (c88_terms['z'] * kz_p)
+		c88_k_0 = (pp_0['bia_c'] / np.sqrt(3)) * onez * (c88_terms['x'] * kx1 + c88_terms['y'] * ky)
+		c88_k_m = (pp_m['bia_c'] / np.sqrt(3)) * oney * (c88_terms['z'] * kz_m)
+		c88_k = c88_k_p + c88_k_0 + c88_k_m
+
+		b8p_terms = kterms['bia_b8p']
+		b8p_termsH = kterms['bia_b8pH']  # hermitian conjugate
+		# NOTE: We cannot just conjugate the result, since that will also conjugate
+		# the derivatives. First order derivatives would pick up an undesired sign
+		# under conjugation.
+		b68p_k_p = (pp_p['bia_b8p'] * 0.5j * np.sqrt(3)) * (b8p_terms['xz'] * kx1 * kz_p + b8p_terms['yz'] * ky * kz_p + b8p_terms['zz'] * kz2_p)
+		b68p_k_0 = (pp_0['bia_b8p'] * 0.5j * np.sqrt(3)) * onez * (b8p_terms['xy'] * kxky + b8p_terms['xx'] * kx2 + b8p_terms['yy'] * ky2)
+		b68p_k_m = (pp_m['bia_b8p'] * 0.5j * np.sqrt(3)) * (b8p_terms['xz'] * kx1 * kz_m + b8p_terms['yz'] * ky * kz_m + b8p_terms['zz'] * kz2_m)
+		b86p_k_p = (pp_p['bia_b8p'] * -0.5j * np.sqrt(3)) * (b8p_termsH['xz'] * kx1 * kz_p + b8p_termsH['yz'] * ky * kz_p + b8p_termsH['zz'] * kz2_p)
+		b86p_k_0 = (pp_0['bia_b8p'] * -0.5j * np.sqrt(3)) * onez * (b8p_termsH['xy'] * kxky + b8p_termsH['xx'] * kx2 + b8p_termsH['yy'] * ky2)
+		b86p_k_m = (pp_m['bia_b8p'] * -0.5j * np.sqrt(3)) * (b8p_termsH['xz'] * kx1 * kz_m + b8p_termsH['yz'] * ky * kz_m + b8p_termsH['zz'] * kz2_m)
+		b68p_k = b68p_k_p + b68p_k_0 + b68p_k_m
+		b86p_k = b86p_k_p + b86p_k_0 + b86p_k_m
+
+		## Basis for quadratic k forms in 5-dimensional representation.
+		k5basis_p = [2 * ky * kz_p, 2 * kx1 * kz_p, 0, 0, oney * (2 * kz2_p) / np.sqrt(3.)]
+		k5basis_0 = [0, 0, 2 * kxky * onez, (kx2 - ky2) * onez, (-kx2 - ky2) * onez / np.sqrt(3.)]
+		k5basis_m = [2 * ky * kz_m, 2 * kx1 * kz_m, 0, 0, oney * (2 * kz2_m) / np.sqrt(3.)]
+		b8m_terms = kterms['bia_b8m']
+		b8m_termsH = kterms['bia_b8mH']  # hermitian conjugate
+		b68m_k = -0.5 * np.sum([(pp_p['bia_b8m'] * k5basis_p[j] + pp_0['bia_b8m'] * k5basis_0[j] + pp_m['bia_b8m'] * k5basis_m[j]) * b8m_terms[j] for j in range(0, 5)], axis = 0)
+		b86m_k = -0.5 * np.sum([(pp_p['bia_b8m'] * k5basis_p[j] + pp_0['bia_b8m'] * k5basis_0[j] + pp_m['bia_b8m'] * k5basis_m[j]) * b8m_termsH[j] for j in range(0, 5)], axis = 0)
+		b68_k = np.asmatrix(b68p_k + b68m_k)
+		b86_k = np.asmatrix(b86p_k + b86m_k)
+
+		# extra terms from out-of-plane gauge field
+		delta_b68p_k = 0.5j * np.sqrt(3) * (0.5j * onez * pp_0['bia_b8p'] * eBz * av_y * dkx * b8p_terms['xy'])
+		delta_b68m_k = -0.5j * onez * pp_0['bia_b8m'] * eBz * av_y * dkx * b8m_terms['2']
+		delta_b86p_k = -0.5j * np.sqrt(3) * (0.5j * onez * pp_0['bia_b8p'] * eBz * av_y * dkx * b8p_termsH['xy'])
+		delta_b86m_k = -0.5j * onez * pp_0['bia_b8m'] * eBz * av_y * dkx * b8m_termsH['2']
+		if not ignore_magnxy:
+			# extra terms from in-plane gauge field
+			b8p_av = 0.5 * (pp_p['bia_b8p'] * av_zp + pp_m['bia_b8p'] * av_zm)  # see also above
+			b8m_av = 0.5 * (pp_p['bia_b8m'] * av_zp + pp_m['bia_b8m'] * av_zm)
+			delta_b68p_k += 0.5j * np.sqrt(3) * (0.5j * oney * b8p_av * (-eBy * b8p_terms['xz'] + eBx * b8p_terms['yz']))
+			delta_b68m_k += -0.5j * oney * b8m_av * (-eBy * b8m_terms['1'] + eBx * b8m_terms['0'])
+			delta_b86p_k += -0.5j * np.sqrt(3) * (0.5j * oney * b8p_av * (-eBy * b8p_termsH['xz'] + eBx * b8p_termsH['yz']))
+			delta_b86m_k += -0.5j * oney * b8m_av * (-eBy * b8m_termsH['1'] + eBx * b8m_termsH['0'])
+
+		hmat1 = np.zeros((params.norbitals, params.norbitals), dtype = complex)
+		hmat1[2:6, 2:6] = c88_k
+		hmat1[0:2, 2:6] = b68_k + delta_b68p_k + delta_b68m_k
+		hmat1[2:6, 0:2] = b86_k + delta_b86p_k + delta_b86m_k
+
+		if params.norbitals == 8:
+			b7_terms = kterms['bia_b7']  # matrices are hermitian
+			b67_k_p = (pp_p['bia_b7'] * -0.5j / np.sqrt(3)) * (b7_terms['xz'] * kx1 * kz_p + b7_terms['yz'] * ky * kz_p + b7_terms['zz'] * kz2_p)
+			b67_k_0 = (pp_0['bia_b7'] * -0.5j / np.sqrt(3)) * onez * (b7_terms['xy'] * kxky + b7_terms['xx'] * kx2 + b7_terms['yy'] * ky2)
+			b67_k_m = (pp_m['bia_b7'] * -0.5j / np.sqrt(3)) * (b7_terms['xz'] * kx1 * kz_m + b7_terms['yz'] * ky * kz_m + b7_terms['zz'] * kz2_m)
+			b67_k = np.asmatrix(b67_k_p + b67_k_0 + b67_k_m)
+			b76_k = -b67_k  # Note: b67_k^dagger = -b67_k
+
+			# extra terms from out-of-plane gauge field
+			delta_b67_k = -0.5j / np.sqrt(3) * (0.5j * onez * pp_0['bia_b7'] * eBz * av_y * dkx * b7_terms['xy'])
+			if not ignore_magnxy:
+				# extra terms from in-plane gauge field
+				b7_av = 0.5 * (pp_p['bia_b7'] * av_zp + pp_m['bia_b7'] * av_zm)  # see also above
+				delta_b67_k += -0.5j / np.sqrt(3) * (0.5j * oney * b7_av * (-eBy * b7_terms['xz'] + eBx * b7_terms['yz']))
+			delta_b76_k = -delta_b67_k
+
+			c87_terms = kterms['bia_c87']
+			c87_termsH = kterms['bia_c87H']  # hermitian conjugate
+			c78_k_p = (pp_p['bia_c'] * 0.5j * np.sqrt(3)) * oney * (c87_terms[2] * kz_p)
+			c78_k_0 = (pp_0['bia_c'] * 0.5j * np.sqrt(3)) * onez * (c87_terms[0] * kx1 + c87_terms[1] * ky)
+			c78_k_m = (pp_m['bia_c'] * 0.5j * np.sqrt(3)) * oney * (c87_terms[2] * kz_m)
+			c87_k_p = (pp_p['bia_c'] * -0.5j * np.sqrt(3)) * oney * (c87_termsH[2] * kz_p)
+			c87_k_0 = (pp_0['bia_c'] * -0.5j * np.sqrt(3)) * onez * (c87_termsH[0] * kx1 + c87_termsH[1] * ky)
+			c87_k_m = (pp_m['bia_c'] * -0.5j * np.sqrt(3)) * oney * (c87_termsH[2] * kz_m)
+			c78_k = np.asmatrix(c78_k_p + c78_k_0 + c78_k_m)
+			c87_k = np.asmatrix(c87_k_p + c87_k_0 + c87_k_m)
+
+			hmat1[0:2, 6:8] = b67_k + delta_b67_k
+			hmat1[6:8, 0:2] = b76_k + delta_b76_k
+			hmat1[6:8, 2:6] = c78_k
+			hmat1[2:6, 6:8] = c87_k
+		hmat = hmat1
+	return hmat
+
+def hstrain(z, params, kterms = None):
+	"""Strain Hamiltonian block Hstrain(z)"""
+	pp = params.z(z)
+	tr_e = pp['epsilonxx'] + pp['epsilonyy'] + pp['epsilonzz']  # shortcut: trace(epsilon)
+	rr = -sqrt(0.75) * pp['bs'] * (pp['epsilonxx'] - pp['epsilonyy']) + 1.j * pp['ds'] * pp['epsilonxy']
+	rrd = np.conjugate(rr)
+	ss = -pp['ds'] * (pp['epsilonxz'] - 1.j * pp['epsilonyz'])
+	ssd = np.conjugate(ss)
+	tt = pp['cs'] * tr_e
+	uu = pp['as'] * tr_e
+	vv = 0.5 * pp['bs'] * (pp['epsilonxx'] + pp['epsilonyy'] - 2 * pp['epsilonzz'])
+
+	if params.lattice_transformed_by_matrix():
+		if kterms is None:
+			raise ValueError("Transformation requires argument kterms to be defined")
+		b_terms = kterms['strain_b']
+		d_terms = kterms['strain_d']
+		b_sum = -pp['bs'] * np.sum([b_terms[co] * pp['epsilon' + co] for co in ['xx', 'yy', 'zz', 'yz', 'xz', 'xy']], axis = 0)
+		d_sum = -0.5 * np.sqrt(3) * pp['ds'] * np.sum([d_terms[co] * pp['epsilon' + co] for co in ['xx', 'yy', 'zz', 'yz', 'xz', 'xy']], axis = 0)
+		h88 = b_sum + d_sum
+		vv = h88[0, 0]  # = -h88[1, 1]
+		rr, rrd = h88[0, 2], h88[2, 0]  # = h88[1, 3], h88[3, 1]
+		ss, ssd = h88[0, 1], h88[1, 0]  # = -h88[2, 3], -h88[3, 2]
+
+	if params.norbitals == 8:
+		s2 = sqrt(2.)
+		s32 = sqrt(1.5)
+		hmat = np.array([
+			[  tt, 0.0,       0.0,       0.0,      0.0,      0.0,       0.0,       0.0],
+			[ 0.0,  tt,       0.0,       0.0,      0.0,      0.0,       0.0,       0.0],
+			[ 0.0, 0.0,   uu + vv,        ss,       rr,      0.0,  -ss / s2,  -s2 * rr],
+			[ 0.0, 0.0,       ssd,   uu - vv,      0.0,       rr,   s2 * vv,  s32 * ss],
+			[ 0.0, 0.0,       rrd,       0.0,  uu - vv,      -ss, s32 * ssd,  -s2 * vv],
+			[ 0.0, 0.0,       0.0,       rrd,     -ssd,  uu + vv,  s2 * rrd, -ssd / s2],
+			[ 0.0, 0.0, -ssd / s2,   s2 * vv, s32 * ss,  s2 * rr,        uu,       0.0],
+			[ 0.0, 0.0, -s2 * rrd, s32 * ssd, -s2 * vv, -ss / s2,       0.0,        uu]])
+	else:
+		hmat = np.array([
+			[  tt, 0.0,     0.0,     0.0,     0.0,     0.0],
+			[ 0.0,  tt,     0.0,     0.0,     0.0,     0.0],
+			[ 0.0, 0.0, uu + vv,      ss,      rr,     0.0],
+			[ 0.0, 0.0,     ssd, uu - vv,     0.0,      rr],
+			[ 0.0, 0.0,     rrd,     0.0, uu - vv,     -ss],
+			[ 0.0, 0.0,     0.0,     rrd,    -ssd, uu + vv]])
+	return hmat
+
+def hzeeman(z, params, magn = None):
+	"""Zeeman effect block Hzeeman(z) for perpendicular magnetic fields"""
+	magn = 0.0 if magn is None else magn
+	if isinstance(magn, Vector) and magn.vtype != 'z':
+		return hzeemanxyz(z, params, magn = magn)  # with in-plane magnetic field components
+	elif isinstance(magn, tuple) and len(magn) == 3:
+		return hzeemanxyz(z, params, magn = magn)  # with in-plane magnetic field components
+	bz = magn.z() if isinstance(magn, Vector) else magn
+
+	kappa = params.z(z)['kappa']
+	g6 = params.z(z)['ge']  # (effective) g factor of the Gamma6 orbitals
+	g8 = gg  # value is 2
+
+	if params.norbitals == 8:
+		hz0 = np.diag([0.5 * g6, -0.5 * g6, -1.5 * kappa * g8, -0.5 * kappa * g8, 0.5 * kappa * g8, 1.5 * kappa * g8, -(kappa + 0.5) * g8, (kappa + 0.5) * g8])
+		hz0[6,3] = -(kappa+1) * g8 / sqrt(2)
+		hz0[7,4] = hz0[6,3]
+		hz0[3,6] = hz0[6,3]
+		hz0[4,7] = hz0[6,3]
+		return muB * bz * hz0
+	else:
+		return muB * bz * np.diag([0.5 * g6, -0.5 * g6, -1.5 * kappa * g8, -0.5 * kappa * g8, 0.5 * kappa * g8, 1.5 * kappa * g8])
+
+def hzeemanxyz(z, params, magn = None):
+	"""Zeeman effect block Hzeeman(z) for magnetic fields in arbitrary direction"""
+	magn = 0.0 if magn is None else magn
+	if isinstance(magn, Vector):
+		bx, by, bz = magn.xyz()
+	else:
+		bx, by, bz = magn
+	bp = bx + 1.j * by
+	bm = bx - 1.j * by
+	kappa = params.z(z)['kappa']
+	g6 = params.z(z)['ge']  # (effective) g factor of the Gamma6 orbitals
+	g8 = gg  # value is 2
+	ka8 = kappa * g8
+
+	if params.norbitals == 8:
+		hz0 = bz * np.diag([0.5 * g6, -0.5 * g6, -1.5 * kappa * g8, -0.5 * kappa * g8, 0.5 * kappa * g8, 1.5 * kappa * g8, -(kappa + 0.5) * g8, (kappa + 0.5) * g8])
+		hz0[6,3] = -(kappa + 1.) * bz * g8 / sqrt(2)
+		hz0[7,4] = hz0[6,3]
+		hz0[3,6] = hz0[6,3]
+		hz0[4,7] = hz0[6,3]
+		s32ka = 0.5 * np.sqrt(3.) * kappa * g8
+		s18ka = np.sqrt(1./8.) * (kappa + 1.) * g8    # note different factor: kappa + 1
+		s38ka = np.sqrt(3./8.) * (kappa + 1.) * g8
+		ka7 = (kappa + 0.5) * g8
+		hz_xy = np.array([
+			[        0, 0.5*g6*bm,        0,        0,        0,        0,        0,        0],
+			[0.5*g6*bp,         0,        0,        0,        0,        0,        0,        0],
+			[        0,         0,        0,-s32ka*bm,        0,        0, s38ka*bm,        0],
+			[        0,         0,-s32ka*bp,        0,  -ka8*bm,        0,        0, s18ka*bm],
+			[        0,         0,        0,  -ka8*bp,        0,-s32ka*bm,-s18ka*bp,        0],
+			[        0,         0,        0,        0,-s32ka*bp,        0,        0,-s38ka*bp],
+			[        0,         0, s38ka*bp,        0,-s18ka*bm,        0,        0,  -ka7*bm],
+			[        0,         0,        0, s18ka*bp,        0,-s38ka*bm,  -ka7*bp,        0]], dtype = complex)
+		return muB * (hz0 + hz_xy)
+	else:
+		hz0 = bz * np.diag([0.5 * g6, -0.5 * g6, -1.5 * kappa * g8, -0.5 * kappa * g8, 0.5 * kappa * g8, 1.5 * kappa * g8])
+		s32ka = 0.5 * np.sqrt(3.) * kappa * g8
+		hz_xy = np.array([
+			[        0, 0.5*g6*bm,        0,        0,        0,        0],
+			[0.5*g6*bp,         0,        0,        0,        0,        0],
+			[        0,         0,        0,-s32ka*bm,        0,        0],
+			[        0,         0,-s32ka*bp,        0,  -ka8*bm,        0],
+			[        0,         0,        0,  -ka8*bp,        0,-s32ka*bm],
+			[        0,         0,        0,        0,-s32ka*bp,        0]], dtype = complex)
+		return muB * (hz0 + hz_xy)
+
+def hzeemancubic(z, params, magn = None):
+	"""Cubic Zeeman effect block Hzeemancubic(z) for magnetic fields in arbitrary direction
+
+	This term is called cubic, because it involves angular momentum matrices to
+	the third power, i.e., Hzc = -2 muB sum_i J_i^3 B_i (i = x,y,z)."""
+	if isinstance(magn, Vector):
+		bx, by, bz = magn.xyz()
+	elif isinstance(magn, float):
+		bx, by, bz = 0.0, 0.0, magn
+	elif magn is None:
+		bx, by, bz = 0.0, 0.0, 0.0
+	else:
+		bx, by, bz = magn
+	bp = bx + 1.j * by
+	bm = bx - 1.j * by
+	q = params.z(z)['q']
+	g8 = gg  # value is 2
+	q8 = q * g8
+	s = 7 * sqrt(3)  # 7 sqrt(3) = sqrt(147)
+
+	hzc = (q8 / 8) * np.array([
+		[0, 0,       0,       0,       0,        0, 0, 0],
+		[0, 0,       0,       0,       0,        0, 0, 0],
+		[0, 0, 27 * bz,  s * bm,       0,   6 * bp, 0, 0],
+		[0, 0,  s * bp,      bz, 20 * bm,        0, 0, 0],
+		[0, 0,       0, 20 * bp,     -bz,   s * bm, 0, 0],
+		[0, 0,  6 * bm,       0,  s * bp, -27 * bz, 0, 0],
+		[0, 0,       0,       0,       0,        0, 0, 0],
+		[0, 0,       0,       0,       0,        0, 0, 0]], dtype = complex)
+
+	norb = params.norbitals
+	return muB * hzc[:norb, :norb]
+
+# Hexchange: Effect due to the exchange interaction of the Mn
+# (as function of the magnetic field in T and the temperature in K)
+def hexchange(z, params, magn = None):
+	"""Mn exchange block Hzeeman(z) for perpendicular magnetic fields.
+	This term encodes the effect of the exchange interaction of the Mn magnetic
+	moments. It is field and temperature dependent."""
+	magn = 0.0 if magn is None else magn
+	if isinstance(magn, Vector) and magn.vtype != 'z':
+		return hexchangexyz(z, params, magn = magn)  # with in-plane magnetic field components
+	elif isinstance(magn, tuple) and len(magn) == 3:
+		return hexchangexyz(z, params, magn = magn)  # with in-plane magnetic field components
+	bz = magn.z() if isinstance(magn, Vector) else magn
+
+	temperature = params.temperature
+	pp = params.z(z)
+	ynalpha = pp['exch_yNalpha']
+	ynbeta  = pp['exch_yNbeta']
+	TK0     = pp['exch_TK0']
+	g       = pp['exch_g']
+	aa     = Aexchange(bz, temperature, TK0=TK0, g=g)
+
+	if params.norbitals == 8:
+		hex0 = np.diag([3. * ynalpha, -3 * ynalpha, 3 * ynbeta, ynbeta, -ynbeta, -3 * ynbeta, -ynbeta, ynbeta])
+		hex0[6,3] = -sqrt(8.) * ynbeta
+		hex0[7,4] = hex0[6,3]
+		hex0[3,6] = hex0[6,3]
+		hex0[4,7] = hex0[6,3]
+		return aa * hex0
+	else:
+		return aa * np.diag([3. * ynalpha, -3 * ynalpha, 3 * ynbeta, ynbeta, -ynbeta, -3 * ynbeta])
+
+def hexchangexyz(z, params, magn = None):
+	"""Mn exchange block Hzeeman(z) for magnetic fields in arbitrary direction.
+	This term encodes the effect of the exchange interaction of the Mn magnetic
+	moments. It is field and temperature dependent."""
+	magn = 0.0 if magn is None else magn
+	if isinstance(magn, Vector):
+		bx, by, bz = magn.xyz()
+	else:
+		bx, by, bz = magn
+	bb = np.sqrt(bx**2 + by**2 + bz**2)
+	if bb == 0.0:
+		return np.zeros((params.norbitals, params.norbitals), dtype = complex)
+	temperature = params.temperature
+	pp = params.z(z)
+
+	ynalpha = pp['exch_yNalpha']
+	ynbeta  = pp['exch_yNbeta']
+	TK0     = pp['exch_TK0']
+	g       = pp['exch_g']
+	ax, ay, az = Aexchange((bx, by, bz), temperature, TK0=TK0, g=g)
+
+	hex6x = spin.restrictmat(spin.sxmat, [0, 1]) * ax * 6 * ynalpha
+	hex6y = spin.restrictmat(spin.symat, [0, 1]) * ay * 6 * ynalpha
+	hex6z = spin.restrictmat(spin.szmat, [0, 1]) * az * 6 * ynalpha
+	hex87x = spin.restrictmat(spin.sxmat, [2, 3, 4, 5, 6, 7]) * ax * 6 * ynbeta
+	hex87y = spin.restrictmat(spin.symat, [2, 3, 4, 5, 6, 7]) * ay * 6 * ynbeta
+	hex87z = spin.restrictmat(spin.szmat, [2, 3, 4, 5, 6, 7]) * az * 6 * ynbeta
+	hexmat = hex6x + hex6y + hex6z + hex87x + hex87y + hex87z
+	return hexmat[:params.norbitals, :params.norbitals]
+
+
+def hconfinement_y(y, params):
+	"""Confinement Hamiltonian: Put a large potential on the boundaries in the y direction.
+	This term should be multiplied by a large number in order to have any effect
+	Typically, a few 10-100 eV (multiply by 1e4 to 1e5) should be sufficient."""
+	return np.identity(params.norbitals) if (y == 0 or y == params.ny - 1) else np.zeros((params.norbitals, params.norbitals))
+
+def hsplit(z, params):
+	"""Hsplit: Artificially lift the degeneracies using the matrix sgn(Jz)"""
+	return np.diag([1., -1., 1., 1., -1., -1., 1., -1.]) if params.norbitals == 8 else np.diag([1., -1., 1., 1., -1., -1.])
+
+def hsplit_zero(z, params, k = None, zero_acc = 1e-8):
+	"""Hsplitzero: Artificially lift the degeneracies using the matrix sgn(Jz) at k = 0 only"""
+	mat = np.diag([1., -1., 1., 1., -1., -1., 1., -1.]) if params.norbitals == 8 else np.diag([1., -1., 1., 1., -1., -1.])
+	if k is None:
+		return mat
+	elif isinstance(k, (int, float, np.integer, np.floating)):
+		return mat if abs(k) < zero_acc else 0 * mat
+	elif isinstance(k, Vector):
+		k1 = k.xyz()
+		normk = np.sqrt(k1[0]**2 + k1[1]**2 + k1[2]**2)
+	elif isinstance(k, list) and len(k) > 0:
+		k1 = np.array(k)
+		normk = np.sqrt(np.sum(k1**2))
+	else:
+		raise TypeError
+	return mat if normk < zero_acc else 0 * mat
+
+def hsplit_bia(z, params, k = None, zero_acc = 1e-8):
+	"""Hsplitbia: A degeneracy lifting matrix that has behaves better than Hsplit if BIA terms are present."""
+	mat = np.diag([1., -1., -1., 1., -1., 1., 1., -1.]) if params.norbitals == 8 else np.diag([1., -1., -1., 1., -1., 1.])
+	if k is None:
+		return mat
+	elif isinstance(k, (int, float, np.integer, np.floating)):
+		return mat if abs(k) < zero_acc else 0 * mat
+	elif isinstance(k, Vector):
+		k1 = k.xyz()
+	elif isinstance(k, list) and len(k) > 0:
+		k1 = np.array(k)
+	else:
+		raise TypeError
+	if params.lattice_transformed_by_matrix():
+		if len(k1) < 3:
+			k1 = np.concatenate((k1, [0.0] * (3 - len(k1))))
+		k1 = np.dot(params.lattice_trans.T, k1)
+	return mat if np.amin(np.abs(k1)) < zero_acc else 0 * mat
+
+def hsplit_helical(z, params, k = None, lattice_reg = False, cross = False, zerosplit = False, zero_acc = 1e-8):
+	"""Artificially lift the degeneracies depending on momentum.
+
+	This may be using a spin matrix 'parallel' to the momentum
+	  H = k.Spin / |k|
+	or 'perpendicular'
+	  H = (kx sy - ky sx) / |k|
+	depending on the value of argument cross.
+
+	Arguments:
+	z            IGNORED
+	params       SysParams instance.
+	k            None, numeric, or list of length 3. Zero momentum (None), kx
+	             (numeric) or [kx, ky, kz] (list).
+	lattice_reg  True or False. Whether to apply lattice regularization to the
+	             momentum.
+	cross        True or False. If False, use the 'parallel' form, see above. If
+	             True, use the 'perpendicular' form.
+	zerosplit    True or False. If False, the splitting at zero momentum is
+	             identically zero. If True, use splitting of form sgn(Jz) at
+	             zero momentum.
+	zero_acc     Float. Threshold value for testing k being zero.
+
+	Returns:
+	Matrix of dimension (norb, norb), where norb is the number of orbitals.
+	"""
+	if isinstance(k, (int, float, np.integer, np.floating)):
+		k = [k, 0.0, 0.0]
+	elif k is None:
+		k = [0.0, 0.0, 0.0]
+	if not (isinstance(k, list) and len(k) == 3):
+		raise TypeError
+	if lattice_reg:
+		cc = params.a_lattice
+		k = [sin(cc * ki) / cc for ki in k]
+	normk = np.sqrt(k[0]**2 + k[1]**2 + k[2]**2)
+	norb = params.norbitals
+	if normk < zero_acc:
+		return hsplit(0, params) if zerosplit else np.zeros((norb, norb), dtype = float)
+	elif cross:
+		k_cross_s = k[1] * spin.sxmat[:norb, :norb] - k[0] * spin.symat[:norb, :norb]
+		return k_cross_s / normk
+	else:
+		k_dot_s = k[0] * spin.sxmat[:norb, :norb] + k[1] * spin.symat[:norb, :norb] + k[2] * spin.szmat[:norb, :norb]
+		return k_dot_s / normk
+
+def hrashba(k, params, alpha, lattice_reg = False):
+	"""'Artificial' Rashba Hamiltonian block Hrashba(kx, ky, kz).
+	This Hamiltonian provides H = alpha . (k × Spin), where . and × denote
+	inner and cross product respectively. This term models the Rashba effect
+	generated by a background potential (electric field). NOTE: This term and
+	the effect of a potential are NOT identical!
+
+	Arguments:
+	k       Momentum
+	params  SysParams instance. Here, we use the lattice constant and the number
+	        of orbitals.
+	alpha   Defines the vector of coefficients (alphax, alphay, alphaz). The
+	        argument may be a Vector, 3-tuple, or a number. In the latter case,
+	        alphax = alphay = 0.
+    lattice_reg  Whether to apply lattice regularization to the momentum k.
+                 NOT YET IMPLEMENTED!
+
+	Returns:
+	6x6 or 8x8 matrix Hrashba.
+	"""
+	if isinstance(alpha, Vector):
+		ax, ay, az = alpha.xyz()
+	elif isinstance(alpha, tuple) and len(alpha) == 3:
+		ax, ay, az = alpha
+	elif isinstance(alpha, (float, int, np.floating, np.integer)):
+		ax, ay, az = 0.0, 0.0, alpha
+	else:
+		raise TypeError("Argument 'alpha' must be a Vector instance, a 3-tuple, or a single number")
+	if isinstance(k, (int, float, np.integer, np.floating)):
+		k = [k, 0.0, 0.0]
+	elif isinstance(k, list) and len(k) <= 3:
+		while len(k) < 3:
+			k = k + [0.0]
+	else:
+		raise TypeError
+	if lattice_reg:
+		cc = params.a_lattice
+		k = [sin(cc * ki) / cc for ki in k]
+	norb = params.norbitals
+	hr = az * (k[0] * spin.symat[:norb, :norb] - k[1] * spin.sxmat[:norb, :norb])  # HRz
+	if ax != 0.0:
+		hr += ax * (k[1] * spin.szmat[:norb, :norb] - k[2] * spin.symat[:norb, :norb])  # HRx
+	if ay != 0.0:
+		hr += ay * (k[2] * spin.sxmat[:norb, :norb] - k[0] * spin.szmat[:norb, :norb])  # HRy
+	return hr
+
+def h_pot_1d(pot, params, axis = 'z'):
+	"""Potential for 1d Hamiltonians (auxiliary function).
+	This function takes a potential either in z or y direction and expands it
+	over the other coordinate.
+
+	Arguments:
+	pot     Numpy array or array-like. The potential along the specified axis.
+	        The array may be a one-dimensional array of length nz * ny, nz or
+	        ny, or a two-dimensional array of shape (nz, ny), (nz, no) or
+	        (ny, no), where no is >= the number of orbitals.
+	params  PhysParams instance
+	axis    'zy', 'z' or 'y'. Whether the argument pot models V(z, y), V(z), or
+	        V(y).
+
+	Returns:
+	A square matrix of dimensions norb * nz * ny.
+	"""
+	if pot is None:
+		return 0.0
+	pot = np.asarray(pot)
+	nz = params.nz
+	ny = params.ny
+	norb = params.norbitals
+	if pot.shape == (nz * ny,):
+		# TODO: How can we make sure that the coordinates are in the right order? Assume it's (z, y).
+		pot1 = np.reshape(pot, (nz, ny)).transpose().flatten()
+		diag = np.repeat(pot1, norb)
+	elif (axis == 'zy' or axis == 'y') and pot.shape == (nz, ny):
+		# Note: In the Hamiltonian matrix, the order of coordinates is (y, z, orb), so we need to transpose
+		diag = np.repeat(pot.transpose().flatten(), norb)
+	elif axis == 'z' and pot.shape[0] == nz:
+		if pot.ndim == 1:
+			diag = np.tile(np.repeat(pot, norb), ny)
+		elif pot.ndim == 2 and pot.shape[1] >= norb:
+			diag = np.tile(pot[:, :norb].flatten(), ny)
+		else:
+			raise ValueError("Potential array has invalid shape")
+	elif axis == 'y' and pot.shape[0] == ny:
+		if pot.ndim == 1:
+			diag = np.repeat(pot, norb * nz)
+		elif pot.ndim == 2 and pot.shape[1] >= norb:
+			diag_full = np.zeros((ny, nz, norb), dtype = float) + pot[:, np.newaxis, :norb]
+			diag = diag_full.flatten()
+		else:
+			raise ValueError("Potential array has invalid shape")
+	else:
+		raise ValueError("Potential array has invalid size, or invalid axis given")
+
+	loc = 0
+	return dia_matrix((np.array([diag,]), loc), shape = (norb * ny * nz, norb * ny * nz)).tocsc()
diff --git a/kdotpy-v1.0.0/src/kdotpy/hamiltonian/full.py b/kdotpy-v1.0.0/src/kdotpy/hamiltonian/full.py
new file mode 100644
index 0000000000000000000000000000000000000000..ad9d45518e52cdb01fee54089dd53e1a11bc1baa
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/hamiltonian/full.py
@@ -0,0 +1,222 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+import sys
+
+from . import blocks as hb
+
+
+def hz(z, dz, k, b, params, lattice_reg = False, ignorestrain = False, axial = True, bia = False, ignore_magnxy = False, kterms = None):
+	"""Full Hamiltonian H(kx, ky, z)"""
+	hh = hb.h0z(z, dz, k, params, kterms = kterms) + hb.h1z(z, dz, k, params, lattice_reg, axial = axial, magn = b, ignore_magnxy = ignore_magnxy, kterms = kterms)
+	if bia:
+		hh += hb.hz_bia(z, dz, k, params, lattice_reg, ignore_magnxy = ignore_magnxy, kterms = kterms, magn = b)
+	if dz == 0:
+		if not ignorestrain:
+			hh += hb.hstrain(z, params, kterms = kterms)
+		hh += hb.hzeeman(z, params, magn = b)
+		# hh += hb.hzeemancubic(z, params, magn = b)  ## TODO
+		if params.has_exchange:
+			hh += hb.hexchange(z, params, magn = b)
+		# hh += hb.hrashba(k, params, (14.0,14.0,0.0), lattice_reg)  ## TODO
+	return hh
+
+def hbulk(k, b, params, lattice_reg = False, ignorestrain = False, axial = True, bia = False, kterms = None):
+	"""Full Hamiltonian H(kx, ky, kz)"""
+	z = None
+	hh = hb.h0bulk(k, params, lattice_reg, kterms = kterms) + hb.h1bulk(k, params, lattice_reg, axial = axial, kterms = kterms) \
+		+ hb.hzeeman(z, params, magn = b)
+	if not ignorestrain:
+		hh += hb.hstrain(z, params, kterms = kterms)
+	if params.has_exchange:
+		hh += hb.hexchange(z, params, magn = b)
+	if bia:
+		hh += hb.hbia_bulk(k, params, lattice_reg, kterms = kterms)
+	return hh
+
+def hz_ll(z, dz, b, n, params, lattice_reg = False, split = 0.0, ignorestrain = False, axial = True):
+	"""Full LL Hamiltonian H_n(kx, ky, z), where n is the LL index."""
+	if n < -2:
+		sys.stderr.write("ERROR (Hz_LL): Landau level index n must be >= -2.\n")
+
+	hh = hb.h0z(z, dz, [0, 0], params) + hb.h1z_ll(z, dz, n, params, lattice_reg, axial = axial, magn = b)
+	if dz == 0:
+		if split != 0.0:
+			hh += split * hb.hsplit(z, params)
+		if not ignorestrain:
+			hh += hb.hstrain(z, params)
+		hh += hb.hzeeman(z, params, magn = b)
+		if params.has_exchange:
+			hh += hb.hexchange(z, params, magn = b)
+
+	if params.norbitals == 8:
+		whichbands = [5,] if n == -2 else [1,4,5,7] if n == -1 else [0,1,3,4,5,6,7] if n == 0 else [0,1,2,3,4,5,6,7]
+	else:
+		whichbands = [5,] if n == -2 else [1,4,5] if n == -1 else [0,1,3,4,5] if n == 0 else [0,1,2,3,4,5]
+	xx, yy = np.meshgrid(whichbands, whichbands)
+
+	return hh[xx.T, yy.T]
+
+def hbulk_ll(k, b, n, params, lattice_reg = False, ignorestrain = False, axial = True, bia = False):
+	"""Full bulk LL Hamiltonian H_n(kx, ky, kz), where n is the LL index."""
+	if n < -2:
+		sys.stderr.write("ERROR (Hz_LL): Landau level index n must be >= -2.\n")
+
+	z = None
+	hh = hb.h0bulk(k, params, lattice_reg) + hb.h1bulk_ll(k, n, params, lattice_reg, axial = axial, magn = b) \
+		+ hb.hzeeman(z, params, magn = b)
+	if not ignorestrain:
+		hh += hb.hstrain(z, params)
+	if params.has_exchange:
+		hh += hb.hexchange(z, params, magn = b)
+	if bia:
+		raise NotImplementedError("BIA terms not supported for hbulk_ll")
+
+	if params.norbitals == 8:
+		whichbands = [5,] if n == -2 else [1,4,5,7] if n == -1 else [0,1,3,4,5,6,7] if n == 0 else [0,1,2,3,4,5,6,7]
+	else:
+		whichbands = [5,] if n == -2 else [1,4,5] if n == -1 else [0,1,3,4,5] if n == 0 else [0,1,2,3,4,5]
+	xx, yy = np.meshgrid(whichbands, whichbands)
+
+	return hh[xx.T, yy.T]
+
+def hzy(z, dz, y, dy, kx, params, boundary = 0, lattice_reg = False, ignorestrain = False, axial = True, bia = False, kterms = None):
+	"""Full Hamiltonian H(kx, y, z), without magnetic field"""
+	# return hzytest(z, dz, y, dy, kx, params, boundary)
+
+	hh = hb.h0zy(z, dz, y, dy, kx, params, kterms = kterms) + hb.h1zy(z, dz, y, dy, kx, params, boundary, lattice_reg, axial = axial, kterms = kterms)
+	if bia:
+		hh += hb.hzy_bia(z, dz, y, dy, kx, params, boundary = boundary, lattice_reg = lattice_reg, magn = 0.0, kterms = kterms)
+	if dz == 0 and dy == 0:
+		if not ignorestrain:
+			hh += hb.hstrain(z, params, kterms = kterms)
+		hh += hb.hzeeman(z, params)
+		if params.has_exchange:
+			hh += hb.hexchange(z, params)
+		hh += params.yconfinement * hb.hconfinement_y(y, params)
+
+	return hh
+
+
+"""
+# full H as function of (kx, y, z)
+def hzytest(z, dz, y, dy, k, params, boundary = 0, lattice_reg = False, ignorestrain = False, ignoreexchange = False):
+	magn = params.magn.z() if isinstance(params.magn, Vector) else params.magn[2] if isinstance(params.magn, tuple) and len(params.magn) == 3 else params.magn
+
+	if isinstance(k, list):
+		kx0 = k[0]
+	else:
+		kx0 = k
+
+	# Peierls substitution:
+	# kx -> kx + eAx with eAx = -(e B / hbar) * b * y
+	# The lattice constant yres is included, because y is just an index
+	y0 = params.ymid * (1.0 + 0)
+	eB = eoverhbar * magn
+	eAx = -eoverhbar * magn * params.yres * (y - y0)
+	# if z in [10, 120]:
+	# 	print (y, y0, eB, eB * params.yres, eAx)
+	if lattice_reg:
+		cc = params.aLattice
+		kx = np.sin(cc * (kx0 + eAx)) / cc
+		kx2 = (1. - np.cos(cc * (kx0 + eAx))) * 2. / cc**2
+		kx02 = (1. - np.cos(cc * kx0)) * 2. / cc**2
+		dkx = np.cos(cc * (kx0 + eAx))
+	else:
+		kx = kx0 + eAx
+		kx2 = kx**2
+		kx02 = kx0**2
+		dkx = 1.
+
+	# Momenta
+	onez  = (1 if dz == 0 else 0)  # for diagonal terms
+	oney  = (1 if dy == 0 else 0)  # for diagonal terms
+	ddy   =  1 if dy == 1 else -1 if dy == -1 else 0  # first
+	av_y = 0.5 if dy == 1 or dy == -1 else 0  # for use in kp2, km2
+	if boundary == 0:     # not at an edge
+		d2dy2 = -2 if dy == 0 else  1 if (dy == 1 or dy == -1) else 0
+	elif boundary ==  1 or boundary == -1:   # at upper/lower edge
+		d2dy2 = -1 if dy == 0 else  1 if (dy == 1 or dy == -1) else 0
+	else:
+		sys.stderr.write("ERROR (h1zy_magn): Boundary number should be -1,0,1\n")
+		exit(1)
+	# print ("(%2i, %2i): %2i" % (y, y+dy, d2dy2))
+
+	kz  = params.c_dz  * ( 1 if dz == 1 else -1 if dz == -1 else 0)
+	#kz2 = params.c_dz2 * (-2 if dz == 0 else 1 if (dz == 1 or dz == -1) else 0)
+	kp  = oney * kx + 1.j * params.c_dy * ddy
+	km  = oney * kx - 1.j * params.c_dy * ddy
+	k2  = oney * kx2 + params.c_dy2 * d2dy2
+
+	# return 1.j * ddy * np.identity(6)
+	# return params.c_dy * ddy * np.identity(6)
+	# return d2dy2 * np.identity(6)
+	# return np.diag([d2dy2, boundary, y, dy , 1.j*dy,0.])
+	return av_y * np.identity(params.norbitals)
+
+	return np.array([\
+	[            y,           1.j * params.c_dy * ddy,  kp,       0.0, km,           0.0 ],\
+	[ -1.j * params.c_dy * ddy,          1.j *  dy,            0.0,kp,      0.0,  km ],\
+	[ km,           0.0,               z,  0,     0,           0.0 ],\
+	[           0.0,     km,           0,      1.j *  dz,       0,            0 ],\
+	[ kp,           0.0,            0,       0,      0,          0 ],\
+	[           0.0,  kp,            0.0,       0,       0,           0 ]])
+	"""
+
+def hzy_magn(z, dz, y, dy, kx, b, params, boundary = 0, lattice_reg = False, ignorestrain = False, gauge_zero = 0.0, axial = True, bia = False, ignore_magnxy = False, kterms = None):
+	"""Full Hamiltonian H(kx, y, z), with magnetic field"""
+	# return hzytest(z, dz, y, dy, kx, params, boundary)
+	if b == 0.0:
+		return hzy(z, dz, y, dy, kx, params, boundary = boundary, lattice_reg = lattice_reg, ignorestrain = ignorestrain, axial = axial, bia = bia, kterms = kterms)
+
+	hh = hb.h0zy(z, dz, y, dy, kx, params, kterms = kterms) + hb.h1zy_magn(z, dz, y, dy, kx, params, boundary, lattice_reg, gauge_zero, axial = axial, magn = b, ignore_magnxy = ignore_magnxy, kterms = kterms)
+	if bia:
+		hh += hb.hzy_bia(z, dz, y, dy, kx, params, boundary = boundary, lattice_reg = lattice_reg, gauge_zero = gauge_zero, magn = b, ignore_magnxy = ignore_magnxy, kterms = kterms)
+	if dz == 0 and dy == 0:
+		if not ignorestrain:
+			hh += hb.hstrain(z, params, kterms = kterms)
+		hh += hb.hzeeman(z, params, magn = b)
+		if params.has_exchange:
+			hh += hb.hexchange(z, params, magn = b)
+		hh += params.yconfinement * hb.hconfinement_y(y, params)
+
+	return hh
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/hamiltonian/hamiltonian.py b/kdotpy-v1.0.0/src/kdotpy/hamiltonian/hamiltonian.py
new file mode 100644
index 0000000000000000000000000000000000000000..2c831d89c067fd8c0128977a4d5837a2149a169c
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/hamiltonian/hamiltonian.py
@@ -0,0 +1,806 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+import math
+from scipy.sparse import csc_matrix, coo_matrix, dia_matrix, issparse
+import sys
+
+from ..momentum import Vector
+from ..lltools import delta_n_ll
+from .full import hz, hz_ll, hzy, hzy_magn, hbulk
+from .blocks import hsplit, hsplit_zero, hsplit_bia, hsplit_helical
+from .parity import parity_z
+from ..parallel import parallel_apply
+
+# For debugging, not for normal usage:
+# from hamiltonian.tools import herm_check, ham_write
+
+### TOOLS FOR SPARSE FULL MATRICES ###
+
+# All neighbour pairs
+def neighbourpairsz(nz):
+	"""Create a list of [z, z+dz, dz] for z from 0 to nz-1 and dz = 0, 1"""
+	pairs = []
+	for z in range(0, nz):
+		for dz in [0, 1]:
+			if z + dz < nz:
+				pairs.append([z, z+dz, dz])
+	return pairs
+
+def sparse_eliminate_zeros(mat, acc = 1e-10):
+	"""Eliminate almost zero values from a sparse matrix"""
+	mat.data[np.abs(mat.data) < acc] = 0
+	mat.eliminate_zeros()
+	return mat
+
+def h_constructor(hamtype, arg1, arg2, params, periodicy = False, solver = None, **kwds):
+	"""Parallel constructor for sparse Hamiltonians
+
+	Arguments:
+	hamtype      hz_sparse_worker, hz_sparse_ll_worker, hzy_sparse_worker, or
+	             hzy_sparse_magn_worker. Worker function for Hamiltonian
+	arg1, arg2   Numerical value for momentum (k), magnetic field (b; not
+	             needed for all matrix types), or Landau level index (n),
+	params       PhysParams instance. Contains information about discretization.
+	periodicy    Boolean value. Sets boundary value for BIA in 1d cases
+	solver       DiagSolver instance. Used to determine parallelization strategy
+	kwds         More options passed through to basic matrix construction
+	             routines.
+
+	Returns:
+	A SciPy sparse CSC matrix.
+	"""
+	nz = params.nz
+	norb = params.norbitals
+	indices0 = np.indices((norb, norb))
+	rows0 = indices0[0].flatten()
+	cols0 = indices0[1].flatten()
+	nbpairs = neighbourpairsz(nz)
+	if solver is not None and solver.num_processes == 1:
+		num_workers = solver.num_threads  # only use parallel workers if there is not already a pool
+		# The following parallelizes matrix construction and takes just about 10% more time if done on just a single worker,
+		# compared to the old code that constructed the complete matrix in one run. As this action is CPU speed bound and not
+		# limited by I/O processes, we need to use workers processes instead of threads. Threads that execute pure python code
+		# are limited by python's Global Interpreter Look.
+		matrixlist = parallel_apply(hamtype, nbpairs, (arg1, arg2, params, periodicy, rows0, cols0,),
+		                            f_kwds=kwds, threads=False, num_processes=num_workers, showstatus=False)
+	else:
+		num_workers = 1  # uses a for loop instead (without the overhead of parallel_apply)
+		matrixlist = [hamtype(nbpair, arg1, arg2, params, periodicy, rows0, cols0, **kwds) for nbpair in nbpairs]
+
+	# If the parallel_apply ends prematurely, for example from a
+	# KeyboardInterrupt or some other signal, raise an error. Otherwise, the
+	# function would have returned a partially constructed matrix to the
+	# subsequent diagonalization step.
+	if len(matrixlist) != len(nbpairs):
+		raise ValueError("Parallel matrix construction returned invalid number of results")
+
+	if num_workers == 1:
+		return sum(matrixlist).tocsc()
+	# Summing of constructed coo matrices can also be parallelized
+	n = int(math.ceil(len(matrixlist) / num_workers))
+	matrixlist = [matrixlist[i:i + n] for i in range(0, len(matrixlist), n)]
+	matrixlist = parallel_apply(sum, matrixlist, num_processes=num_workers, showstatus=False, threads=False)
+
+	return sum(matrixlist).tocsc()
+
+### SPARSE FULL MATRICES ###
+def hz_sparse_worker(p, k, b, params, periodicy, rows0, cols0, **kwds):
+	"""Sparse matrix constructor for Hamiltonian H(kx, ky, z) ('2D')"""
+	nz = params.nz
+	norb = params.norbitals
+	allrows = []
+	allcols = []
+	allvals = []
+
+	# diagonal block
+	if p[2] == 0:
+		m = hz(p[0], 0, k, b, params, **kwds)
+		rows = p[0] * norb + rows0
+		cols = p[0] * norb + cols0
+		allrows.append(rows)
+		allcols.append(cols)
+		allvals.append(m.flatten())
+	# off diagonal blocks
+	elif p[2] == 1:
+		mm = 0.5 * (hz(p[1], -1, k, b,  params, **kwds) + hz(p[0], 1, k, b, params, **kwds).conjugate().transpose())
+		mp = mm.conjugate().transpose()
+		# herm_check(hz(p[1], -1, k, params, **kwds), hz(p[0], 1, k, params, **kwds), p[0])
+		rows = p[0] * norb + rows0
+		cols = p[1] * norb + cols0
+		allrows.append(rows)
+		allcols.append(cols)
+		allvals.append(mp.flatten())
+		rows = p[1] * norb + rows0
+		cols = p[0] * norb + cols0
+		allrows.append(rows)
+		allcols.append(cols)
+		allvals.append(mm.flatten())
+
+	non0 = (np.array(allvals).flatten() != 0)
+	s = coo_matrix((np.array(allvals).flatten()[non0], (np.array(allrows).flatten()[non0], np.array(allcols).flatten()[non0])), shape = (norb * nz, norb * nz), dtype = complex)
+
+	return s
+
+def hz_sparse_ll_worker(p, b, n, params, periodicy, rows0, cols0, **kwds):
+	"""Sparse matrix constructor for LL Hamiltonian H_n(kx, ky, z)"""
+	nz = params.nz
+	if params.norbitals == 8:
+		nbands = 1 if n == -2 else 4 if n == -1 else 7 if n == 0 else 8
+	else:
+		nbands = 1 if n == -2 else 3 if n == -1 else 5 if n == 0 else 6
+	indices0 = np.indices((nbands, nbands))
+	rows0 = indices0[0].flatten()
+	cols0 = indices0[1].flatten()
+	allrows = []
+	allcols = []
+	allvals = []
+
+	# diagonal block
+	if p[2] == 0:
+		m = hz_ll(p[0], 0, b, n, params, **kwds)
+		rows = p[0] * nbands + rows0
+		cols = p[0] * nbands + cols0
+		allrows.append(rows)
+		allcols.append(cols)
+		allvals.append(m.flatten())
+	# off diagonal blocks
+	elif p[2] == 1:
+		mm = 0.5 * (hz_ll(p[1], -1, b, n, params, **kwds) + hz_ll(p[0], 1, b, n, params, **kwds).conjugate().transpose())
+		mp = mm.conjugate().transpose()
+		rows = p[0] * nbands + rows0
+		cols = p[1] * nbands + cols0
+		allrows.append(rows)
+		allcols.append(cols)
+		allvals.append(mp.flatten())
+		rows = p[1] * nbands + rows0
+		cols = p[0] * nbands + cols0
+		allrows.append(rows)
+		allcols.append(cols)
+		allvals.append(mm.flatten())
+
+	non0 = (np.array(allvals).flatten() != 0)
+	s = coo_matrix(
+		(np.array(allvals).flatten()[non0], (np.array(allrows).flatten()[non0], np.array(allcols).flatten()[non0])),
+		shape = (nbands * nz, nbands * nz), dtype = complex)
+
+	return s
+
+
+def hzy_sparse_worker(p, kx, b, params, periodicy, rows0, cols0, **kwds):
+	"""Sparse matrix constructor for Hamiltonian H(kx, y, z) ('1D'); version without magnetic field."""
+	ny = params.ny
+	nz = params.nz
+	norb = params.norbitals
+	norbnz = norb * nz
+	allrows = []
+	allcols = []
+	allvals = []
+
+	# diagonal block in z
+	if p[2] == 0:
+		for y in range(0, ny):
+			boundary = 0 if periodicy else -1 if y == 0 else 1 if y == ny - 1 else 0
+			m = hzy(p[0], 0, y, 0, kx, params, boundary, **kwds)
+			rows = y * norbnz + p[0] * norb + rows0
+			cols = y * norbnz + p[0] * norb + cols0
+			allrows.append(rows)
+			allcols.append(cols)
+			allvals.append(m.flatten())
+		for y in range(0, ny - 1):
+			boundary = 0 if periodicy else -1 if y == 0 else 1 if y == ny - 2 else 0
+			mym = 0.5 * (hzy(p[0], 0, y + 1, -1, kx, params, boundary, **kwds) +
+			             hzy(p[0], 0, y, 1, kx, params, boundary, **kwds).conjugate().transpose())
+			# if y % 47 == 0:
+			# 	herm_check(hzy(p[0], 0, y + 1, -1, kx, params, boundary, **kwds), hzy(p[0], 0, y, 1, kx, params, boundary, **kwds), p[0], y)
+			myp = mym.conjugate().transpose()
+			rows = y       * norbnz + p[0] * norb + rows0
+			cols = (y + 1) * norbnz + p[0] * norb + cols0
+			allrows.append(rows)
+			allcols.append(cols)
+			allvals.append(myp.flatten())
+			rows = (y + 1) * norbnz + p[0] * norb + rows0
+			cols = y       * norbnz + p[0] * norb + cols0
+			allrows.append(rows)
+			allcols.append(cols)
+			allvals.append(mym.flatten())
+		if periodicy:  # if boundary conditions are periodic:
+			mym = 0.5 * (hzy(p[0], 0, 0, -1, kx, params, **kwds) + hzy(p[0], 0, ny - 1, 1, kx, params, **kwds).conjugate().transpose())
+			myp = mym.conjugate().transpose()
+			rows = (ny - 1) * norbnz + p[0] * norb + rows0
+			cols = 0        * norbnz + p[0] * norb + cols0
+			allrows.append(rows)
+			allcols.append(cols)
+			allvals.append(myp.flatten())
+			rows = 0        * norbnz + p[0] * norb + rows0
+			cols = (ny - 1) * norbnz + p[0] * norb + cols0
+			allrows.append(rows)
+			allcols.append(cols)
+			allvals.append(mym.flatten())
+
+	# off diagonal blocks
+	elif p[2] == 1:
+		for y in range(0, ny):
+			boundary = 0 if periodicy else -1 if y == 0 else 1 if y == ny - 1 else 0
+			mzm = 0.5 * (hzy(p[1], -1, y, 0, kx, params, boundary, **kwds) +
+			             hzy(p[0], 1, y, 0, kx, params, boundary, **kwds).conjugate().transpose())
+			mzp = mzm.conjugate().transpose()
+			rows = y * norbnz + p[0] * norb + rows0
+			cols = y * norbnz + p[1] * norb + cols0
+			allrows.append(rows)
+			allcols.append(cols)
+			allvals.append(mzp.flatten())
+			rows = y * norbnz + p[1] * norb + rows0
+			cols = y * norbnz + p[0] * norb + cols0
+			allrows.append(rows)
+			allcols.append(cols)
+			allvals.append(mzm.flatten())
+		# blocks off-diagonal in z and y (for kz ky terms)
+		for y in range(0, ny - 1):
+			boundary = 0 if periodicy else -1 if y == 0 else 1 if y == ny - 2 else 0
+			mzmym = 0.5 * (hzy(p[1], -1, y + 1, -1, kx, params, boundary, **kwds) +
+			               hzy(p[0], 1, y, 1, kx, params, boundary, **kwds).conjugate().transpose())
+			mzpym = 0.5 * (hzy(p[0], 1, y + 1, -1, kx, params, boundary, **kwds) +
+			               hzy(p[1], -1, y, 1, kx, params, boundary, **kwds).conjugate().transpose())
+			# if y % 47 == 0:
+			# 	herm_check(hzy(p[1], -1, y + 1, -1, kx, params, boundary, **kwds), hzy(p[0], 1, y, 1, kx, params, boundary, **kwds), p[0], y)
+			# 	herm_check(hzy(p[0], 1, y + 1, -1, kx, params, boundary, **kwds), hzy(p[1], -1, y, 1, kx, params, boundary, **kwds), p[0], y)
+			mzpyp = mzmym.conjugate().transpose()
+			mzmyp = mzpym.conjugate().transpose()
+			rows = y       * norbnz + p[0] * norb + rows0
+			cols = (y + 1) * norbnz + p[1] * norb + cols0
+			allrows.append(rows)
+			allcols.append(cols)
+			allvals.append(mzpyp.flatten())
+			rows = (y + 1) * norbnz + p[0] * norb + rows0
+			cols = y       * norbnz + p[1] * norb + cols0
+			allrows.append(rows)
+			allcols.append(cols)
+			allvals.append(mzpym.flatten())
+			rows = y       * norbnz + p[1] * norb + rows0
+			cols = (y + 1) * norbnz + p[0] * norb + cols0
+			allrows.append(rows)
+			allcols.append(cols)
+			allvals.append(mzmyp.flatten())
+			rows = (y + 1) * norbnz + p[1] * norb + rows0
+			cols = y       * norbnz + p[0] * norb + cols0
+			allrows.append(rows)
+			allcols.append(cols)
+			allvals.append(mzmym.flatten())
+		if periodicy:  # if boundary conditions are periodic:
+			mzmym = 0.5 * (hzy(p[1], -1, 0, -1, kx, params, **kwds) + hzy(p[0], 1, ny - 1, 1, kx, params, **kwds).conjugate().transpose())
+			mzpym = 0.5 * (hzy(p[0], 1, 0, -1, kx, params, **kwds) + hzy(p[1], -1, ny - 1, 1, kx, params, **kwds).conjugate().transpose())
+			mzpyp = mzmym.conjugate().transpose()
+			mzmyp = mzpym.conjugate().transpose()
+			rows = (ny - 1) * norbnz + p[0] * norb + rows0
+			cols = 0        * norbnz + p[1] * norb + cols0
+			allrows.append(rows)
+			allcols.append(cols)
+			allvals.append(mzpyp.flatten())
+			rows = 0        * norbnz + p[0] * norb + rows0
+			cols = (ny - 1) * norbnz + p[1] * norb + cols0
+			allrows.append(rows)
+			allcols.append(cols)
+			allvals.append(mzpym.flatten())
+			rows = (ny - 1) * norbnz + p[1] * norb + rows0
+			cols = 0        * norbnz + p[0] * norb + cols0
+			allrows.append(rows)
+			allcols.append(cols)
+			allvals.append(mzmyp.flatten())
+			rows = 0        * norbnz + p[1] * norb + rows0
+			cols = (ny - 1) * norbnz + p[0] * norb + cols0
+			allrows.append(rows)
+			allcols.append(cols)
+			allvals.append(mzmym.flatten())
+	allvals = (np.array(allvals).flatten())
+
+	non0 = (allvals != 0)
+	s = coo_matrix(
+		(allvals[non0], (np.array(allrows).flatten()[non0], np.array(allcols).flatten()[non0])),
+		shape=(norb * ny * nz, norb * ny * nz), dtype=complex)
+
+	return s
+
+def hzy_sparse_magn_worker(p, kx, b, params, periodicy, rows0, cols0, **kwds):
+	"""Sparse matrix constructor for Hamiltonian H(kx, y, z) ('1D'); version with magnetic field."""
+	ny = params.ny
+	nz = params.nz
+	norb = params.norbitals
+	# s = dok_matrix((norb * ny * nz,norb * ny * nz), dtype = complex)
+	norbnz = norb * nz
+	allrows = []
+	allcols = []
+	allvals = []
+
+	# diagonal block in z
+	if p[2] == 0:
+		for y in range(0, ny):
+			boundary = 0 if periodicy else -1 if y == 0 else 1 if y == ny - 1 else 0
+			m = hzy_magn(p[0], 0, y, 0, kx, b, params, boundary, **kwds)
+			# print ("HERM (y,y) w/ B:", np.amax(np.abs(m - m.conjugate().transpose())))
+			rows = y * norbnz + p[0] * norb + rows0
+			cols = y * norbnz + p[0] * norb + cols0
+			allrows.append(rows)
+			allcols.append(cols)
+			allvals.append(m.flatten())
+		for y in range(0, ny - 1):
+			boundary = 0 if periodicy else -1 if y == 0 else 1 if y == ny - 2 else 0
+			mym = 0.5 * (hzy_magn(p[0], 0, y + 1, -1, kx, b, params, boundary, **kwds) +
+			             hzy_magn(p[0], 0, y, 1, kx, b, params, boundary, **kwds).conjugate().transpose())
+			myp = mym.conjugate().transpose()
+			# if y % 47 == 0:
+			# 	print("dy")
+			# 	herm_check(hzy_magn(p[0], 0, y + 1, -1, kx, b, params, boundary, **kwds), hzy_magn(p[0], 0, y, 1, kx, b, params, boundary, **kwds), p[0], y)
+			rows = y       * norbnz + p[0] * norb + rows0
+			cols = (y + 1) * norbnz + p[0] * norb + cols0
+			allrows.append(rows)
+			allcols.append(cols)
+			allvals.append(myp.flatten())
+			rows = (y + 1) * norbnz + p[0] * norb + rows0
+			cols =  y      * norbnz + p[0] * norb + cols0
+			allrows.append(rows)
+			allcols.append(cols)
+			allvals.append(mym.flatten())
+		if periodicy:  # if boundary conditions are periodic:
+			mym = 0.5 * (hzy_magn(p[0], 0, 0, -1, kx, b, params, **kwds) +
+			             hzy_magn(p[0], 0, ny - 1, 1, kx, b, params, **kwds).conjugate().transpose())
+			myp = mym.conjugate().transpose()
+			rows = (ny - 1) * norbnz + p[0] * norb + rows0
+			cols = 0        * norbnz + p[0] * norb + cols0
+			allrows.append(rows)
+			allcols.append(cols)
+			allvals.append(myp.flatten())
+			rows = 0        * norbnz + p[0] * norb + rows0
+			cols = (ny - 1) * norbnz + p[0] * norb + cols0
+			allrows.append(rows)
+			allcols.append(cols)
+			allvals.append(mym.flatten())
+
+	# off diagonal blocks
+	elif p[2] == 1:
+		for y in range(0, ny):
+			boundary = 0 if periodicy else -1 if y == 0 else 1 if y == ny - 1 else 0
+			mzm = 0.5 * (hzy_magn(p[1], -1, y, 0, kx, b, params, boundary, **kwds) +
+			             hzy_magn(p[0], 1, y, 0, kx, b, params, boundary, **kwds).conjugate().transpose())
+			mzp = mzm.conjugate().transpose()
+			# if y % 47 == 0:
+			# 	print("dz")
+			# 	herm_check(hzy_magn(p[1], -1, y, 0, kx, b, params, boundary, **kwds), hzy_magn(p[0], 1, y, 0, kx, b, params, boundary, **kwds), p[0], y)
+			rows = y * norbnz + p[0] * norb + rows0
+			cols = y * norbnz + p[1] * norb + cols0
+			allrows.append(rows)
+			allcols.append(cols)
+			allvals.append(mzp.flatten())
+			rows = y * norbnz + p[1] * norb + rows0
+			cols = y * norbnz + p[0] * norb + cols0
+			allrows.append(rows)
+			allcols.append(cols)
+			allvals.append(mzm.flatten())
+		# blocks off-diagonal in z and y (for kz ky terms)
+		for y in range(0, ny - 1):
+			boundary = 0 if periodicy else -1 if y == 0 else 1 if y == ny - 2 else 0
+			mzmym = 0.5 * (hzy_magn(p[1], -1, y + 1, -1, kx, b, params, boundary, **kwds) +
+			               hzy_magn(p[0], 1, y, 1, kx, b, params, boundary, **kwds).conjugate().transpose())
+			mzpym = 0.5 * (hzy_magn(p[0], 1, y + 1, -1, kx, b, params, boundary, **kwds) +
+			               hzy_magn(p[1], -1, y, 1, kx, b, params, boundary, **kwds).conjugate().transpose())
+			# if y % 47 == 0:
+			# 	print("dz dy")
+			# 	herm_check(hzy_magn(p[1], -1, y + 1, -1, kx, b, params, boundary, **kwds), hzy_magn(p[0], 1, y, 1, kx, b, params, boundary, **kwds), p[0], y)
+			# 	herm_check(hzy_magn(p[0], 1, y + 1, -1, kx, b, params, boundary, **kwds), hzy_magn(p[1], -1, y, 1, kx, b, params, boundary, **kwds), p[0], y)
+			mzpyp = mzmym.conjugate().transpose()
+			mzmyp = mzpym.conjugate().transpose()
+			rows = y       * norbnz + p[0] * norb + rows0
+			cols = (y + 1) * norbnz + p[1] * norb + cols0
+			allrows.append(rows)
+			allcols.append(cols)
+			allvals.append(mzpyp.flatten())
+			rows = (y + 1) * norbnz + p[0] * norb + rows0
+			cols = y       * norbnz + p[1] * norb + cols0
+			allrows.append(rows)
+			allcols.append(cols)
+			allvals.append(mzpym.flatten())
+			rows =  y      * norbnz + p[1] * norb + rows0
+			cols = (y + 1) * norbnz + p[0] * norb + cols0
+			allrows.append(rows)
+			allcols.append(cols)
+			allvals.append(mzmyp.flatten())
+			rows = (y + 1) * norbnz + p[1] * norb + rows0
+			cols =  y      * norbnz + p[0] * norb + cols0
+			allrows.append(rows)
+			allcols.append(cols)
+			allvals.append(mzmym.flatten())
+		if periodicy:  # if boundary conditions are periodic:
+			mzmym = 0.5 * (hzy_magn(p[1], -1, 0, -1, kx, b, params, **kwds) +
+			               hzy_magn(p[0], 1, ny - 1, 1, kx, b, params, **kwds).conjugate().transpose())
+			mzpym = 0.5 * (hzy_magn(p[0], 1, 0, -1, kx, b, params, **kwds) +
+			               hzy_magn(p[1], -1, ny - 1, 1, kx, b, params, **kwds).conjugate().transpose())
+			mzpyp = mzmym.conjugate().transpose()
+			mzmyp = mzpym.conjugate().transpose()
+			rows = (ny - 1) * norbnz + p[0] * norb + rows0
+			cols = 0        * norbnz + p[1] * norb + cols0
+			allrows.append(rows)
+			allcols.append(cols)
+			allvals.append(mzpyp.flatten())
+			rows = 0        * norbnz + p[0] * norb + rows0
+			cols = (ny - 1) * norbnz + p[1] * norb + cols0
+			allrows.append(rows)
+			allcols.append(cols)
+			allvals.append(mzpym.flatten())
+			rows = (ny - 1) * norbnz + p[1] * norb + rows0
+			cols = 0        * norbnz + p[0] * norb + cols0
+			allrows.append(rows)
+			allcols.append(cols)
+			allvals.append(mzmyp.flatten())
+			rows = 0        * norbnz + p[1] * norb + rows0
+			cols = (ny - 1) * norbnz + p[0] * norb + cols0
+			allrows.append(rows)
+			allcols.append(cols)
+			allvals.append(mzmym.flatten())
+
+	allvals = (np.array(allvals).flatten())
+
+	non0 = (allvals != 0)
+	s = coo_matrix(
+		(allvals[non0], (np.array(allrows).flatten()[non0], np.array(allcols).flatten()[non0])),
+		shape=(norb * ny * nz, norb * ny * nz), dtype=complex)
+
+	return s
+
+
+def hz_sparse_ll_full(h_sym, ll_max, magn, norb = 8, all_dof = False, is_hermitian = True):
+	"""Sparse matrix constructor for LL Hamiltonian H(kx, ky, z) in full LL mode.
+	This matrix contains all LLs up to index ll_max.
+
+	Arguments:
+	h_sym          Symbolic hamiltonian or operator
+	ll_max         Maximal LL index
+	magn           Magnetic field (vector or z component)
+	norb           Number of orbitals in kp model
+	all_dof        True or False. Whether to include 'unphysical' degrees of
+	               freedom for the lower LL indices. If False, reduce the matrix
+	               by eliminating all 'unphysical' degrees of freedom, which
+	               should be characterized by all zeros in the respective rows
+	               and columns. If set to True, then keep everything, and
+	               preserve the shape of the matrix.
+	is_hermitian   True or False. If input h_sym is hermitian, this can be used
+	               speed up construction, as hermicity is enforced by
+	               conjugation. If used to construct an (possibly non-hermitian)
+	               operator, e.g., for transition matrices, then all
+	               off-diagonals are calculated separately.
+
+	Returns:
+	A SciPy sparse CSC matrix.
+	"""
+	delta_n_vec = delta_n_ll(norb, magn.z() if isinstance(magn, Vector) else magn)
+	if h_sym.dim % norb != 0:
+		raise ValueError("Size of input matrix not compatible with number of orbitals")
+	nz = h_sym.dim // norb
+	if not all_dof:
+		sizes = nz * np.array([np.count_nonzero(n + delta_n_vec >= 0) for n in range(-2, ll_max + 1)])
+	else:
+		sizes = h_sym.dim * np.ones(ll_max + 3, dtype = int)
+	indices = np.concatenate(([0, ], np.cumsum(sizes)))
+	# print ("Sizes", sizes)
+	# print ("Indices", indices)
+	allrows = []
+	allcols = []
+	allvals = []
+	for n in range(-2, ll_max + 1):
+		for m in range(n, min(ll_max + 1, n + 5)):
+			ham = h_sym.ll_evaluate((n, m), magn, delta_n_vec, all_dof = all_dof)
+			if not issparse(ham):
+				ham = csc_matrix(ham)
+			sparse_eliminate_zeros(ham, 1e-10)
+			# print ('H(%i,%i): %s %s %s %i %s' % (n, m, type(ham), ham.shape, (sizes[n+2], sizes[m+2]), ham.nnz, np.max(np.abs(ham)) > 1e-10))
+			if ham.nnz > 0:
+				# store data in full hamiltonian
+				hamcoo = ham.tocoo()
+				allrows.append(indices[n+2] + hamcoo.row)
+				allcols.append(indices[m+2] + hamcoo.col)
+				allvals.append(hamcoo.data)
+				if (m > n) and is_hermitian:  # add conjugate (lower-triangular block)
+					allrows.append(indices[m+2] + hamcoo.col)
+					allcols.append(indices[n+2] + hamcoo.row)
+					allvals.append(np.conjugate(hamcoo.data))
+			if not is_hermitian and (m > n):  # add lower-triangular block in non-hermitian case
+				ham = h_sym.ll_evaluate((m, n), magn, delta_n_vec, all_dof = all_dof)
+				if not issparse(ham):
+					ham = csc_matrix(ham)
+				sparse_eliminate_zeros(ham, 1e-10)
+				if ham.nnz > 0:
+					hamcoo = ham.tocoo()
+					allrows.append(indices[m+2] + hamcoo.row)
+					allcols.append(indices[n+2] + hamcoo.col)
+					allvals.append(hamcoo.data)
+
+	dim = indices[-1]
+	if len(allvals) == 0:
+		return csc_matrix((dim, dim), dtype = complex)  # zero
+	allrows = np.concatenate(allrows)
+	allcols = np.concatenate(allcols)
+	allvals = np.concatenate(allvals)
+	s = coo_matrix((allvals, (allrows, allcols)), shape = (dim, dim), dtype = complex)
+	return s.tocsc()
+
+def hz_block_diag(h_block, params, **kwds):
+	"""Sparse matrix constructor for block-diagonal matrices.
+	This function expands a matrix block (h_block) to the desired size
+	norb * nz * ny. This function is faster than scipy.sparse.block_diag. It can
+	achieve this because we are repeating the same matrix, whereas the SciPy
+	function takes a sequence of possibly different matrices.
+
+	Arguments:
+	h_block   Function that calculates the block. It should return a matrix of
+	          shape (norb, norb), where norb = params.norbitals, either 6 or 8.
+	params    SysParams instance. Used to extract nz and norb, and is passed as
+	          an argument to the function h_block.
+	kwds      Keyword arguments passed to the function h_block.
+
+	Returns:
+	A SciPy sparse CSC matrix.
+	"""
+	nz = params.nz
+	norb = params.norbitals
+	nbpairs = neighbourpairsz(nz)
+	indices0 = np.indices((norb, norb))
+	rows0 = indices0[0].flatten()
+	cols0 = indices0[1].flatten()
+	allrows = []
+	allcols = []
+	allvals = []
+	for p in nbpairs:
+		# diagonal block
+		if p[2] == 0:
+			m = h_block(p[0], params, **kwds)
+			rows = p[0] * norb + rows0
+			cols = p[0] * norb + cols0
+			allrows.append(rows)
+			allcols.append(cols)
+			allvals.append(m.flatten())
+
+	non0 = (np.array(allvals).flatten() != 0)
+	s = coo_matrix(
+		(np.array(allvals).flatten()[non0], (np.array(allrows).flatten()[non0], np.array(allcols).flatten()[non0])),
+		shape=(norb * nz, norb * nz), dtype=complex)
+
+	return s.tocsc()
+
+def hsplit_full(params, splittype = 'auto', k = None, kdim = None, bia = False, lattice_reg = False):
+	"""Sparse matrix for degeneracy splitting Hamiltonian.
+	Construct 'splitting' Hamiltonian based on splittype argument and a few
+	other parameters. In order to get the appropriate strength, the result can
+	be multiplied by an appropriate coefficient afterwards.
+
+	Arguments:
+	params       SysParams instance
+	splittype    String. The type of degeneracy lifting. Must be one of:
+	             'automatic', 'auto', 'sgnjz', 'sgnjz0', 'bia', 'helical',
+	             'helical0', 'cross', 'cross0', 'isopz'
+	k            None or list. Momentum value, needed for 'bia', 'helical', and
+	             'cross' types.
+	kdim         None, 1, 2, or 3. The number of momentum dimensions. If None,
+	             take it from params.kdim. Specify it explicitly to override the
+	             value params.kdim.
+	bia          True or False. If bulk inversion asymmetry is present. This
+	             determines the type if the type is set to 'auto' or
+	             'automatic'.
+	lattice_reg  True or False. If lattice regularization should be taken into
+	             account. Only affects types 'helical', 'helical0', 'cross', and
+	             'cross0'.
+
+	Returns:
+	A SciPy sparse CSC matrix.
+	"""
+	splittypes = ['automatic', 'auto', 'sgnjz', 'sgnjz0', 'bia', 'helical', 'helical0', 'cross', 'cross0', 'isopz', 'isopzw', 'isopzs']
+	if splittype == 'automatic' or splittype == 'auto':
+		splittype = 'bia' if bia else 'sgnjz'
+	elif splittype not in splittypes:
+		sys.stderr.write("ERROR (hsplit_full): Splitting type must be one of %s.\n" % (", ".join(splittypes)))
+		exit(1)
+
+	if kdim is None:
+		kdim = params.kdim
+
+	if kdim == 3:
+		if splittype == 'sgnjz':
+			return hsplit(0, params)
+		elif splittype == 'sgnjz0':
+			return hsplit_zero(0, params, k = k)
+		elif splittype == 'bia':
+			return hsplit_bia(0, params, k = k)
+		elif splittype in ['helical', 'helical0', 'cross', 'cross0']:
+			if isinstance(k, list) and len(k) < 3:
+				k = k + [0] * (3 - len(k))
+			cross = splittype.startswith('cross')
+			zerosplit = splittype.endswith('0')
+			return hsplit_helical(0, params, k = k, lattice_reg = lattice_reg, cross = cross, zerosplit = zerosplit)
+		elif splittype in ['isopz', 'isopzw', 'isopzs']:
+			return parity_z(1, 1, params.norbitals, isoparity = True)
+		else:
+			raise ValueError("Invalid value for variable 'splittype'.")
+
+	if splittype == 'isopz':
+		ny = 1 if kdim == 2 else params.ny
+		return parity_z(params.nz, ny, params.norbitals, isoparity = True)
+	if splittype in ['isopzw', 'isopzs']:
+		ny = 1 if kdim == 2 else params.ny
+		zrange = params.well_z() if splittype == 'isopzw' else params.symmetric_z()
+		if zrange[0] is None or zrange[1] is None:
+			zrange = None
+		return parity_z(params.nz, ny, params.norbitals, isoparity = True, zrange = zrange)
+
+	if splittype == 'sgnjz':
+		ham_split_z = hz_block_diag(hsplit, params)
+	elif splittype == 'sgnjz0':
+		ham_split_z = hz_block_diag(hsplit_zero, params, k = k)
+	elif splittype == 'bia':
+		ham_split_z = hz_block_diag(hsplit_bia, params, k = k)
+	elif splittype in ['helical', 'helical0', 'cross', 'cross0']:
+		if isinstance(k, list) and len(k) < 3:
+			k = k + [0] * (3 - len(k))
+		cross = splittype.startswith('cross')
+		zerosplit = splittype.endswith('0')
+		ham_split_z = hz_block_diag(hsplit_helical, params, k = k, lattice_reg = lattice_reg, cross = cross, zerosplit = zerosplit)
+	else:
+		raise ValueError("Invalid value for variable 'splittype'.")
+	if kdim == 2:
+		return ham_split_z
+	elif kdim == 1:
+		hz_coo = ham_split_z.tocoo()
+		ny = params.ny
+		hz_shape = hz_coo.shape[0]
+		hzy_shape = hz_shape * ny
+		hzy_indices = np.arange(0, ny) * hz_shape
+		hzy_row = np.tile(hz_coo.row, ny) + np.repeat(hzy_indices, hz_coo.nnz)
+		hzy_col = np.tile(hz_coo.col, ny) + np.repeat(hzy_indices, hz_coo.nnz)
+		hzy_data = np.tile(hz_coo.data, ny)
+		hzy_coo = coo_matrix((hzy_data, (hzy_row, hzy_col)), shape = (hzy_shape, hzy_shape))
+		return hzy_coo.tocsc()
+	else:
+		raise ValueError("Variable 'kdim' must be 1, 2, or 3.")
+
+def hsplit_ll_full(ll_max, nz, norb = 8):
+	"""Sparse matrix constructor for LL Hamiltonian Hsplit(kx, ky, z) in full LL mode."""
+	# TODO: Why does this need to be separate from hz_sparse_ll_full?
+
+	delta_n_vec = delta_n_ll(norb)  # (sign of) magnetic field not required (since we only need count)
+	sizes = nz * np.array([np.count_nonzero(n + delta_n_vec >= 0) for n in range(-2, ll_max + 1)])
+	indices = np.concatenate(([0, ], np.cumsum(sizes)))
+	# print (sizes)
+	# print (indices)
+	allrows = []
+	allcols = []
+	allvals = []
+	for n in range(-2, ll_max + 1):
+		allrows.append(indices[n+2] + np.arange(0, sizes[n+2]))
+		allcols.append(indices[n+2] + np.arange(0, sizes[n+2]))
+		allvals.append(n * np.ones(sizes[n+2], dtype = complex))
+
+	dim = indices[-1]
+	if len(allvals) == 0:
+		return csc_matrix((dim, dim), dtype = complex)  # zero
+	allrows = np.concatenate(allrows)
+	allcols = np.concatenate(allcols)
+	allvals = np.concatenate(allvals)
+	s = coo_matrix((allvals, (allrows, allcols)), shape = (dim, dim), dtype = complex)
+	return s.tocsc()
+
+def hz_sparse_split(k, b, params, split = 0.0, splittype = 'auto', bia = False, lattice_reg = False, **kwds):
+	"""Thin wrapper of hz_sparse and hsplit_full.
+
+	Useful as argument for SymbolicHamiltonian constructor and other functions,
+	that require the combination to be defined as a single function.
+	"""
+	ham = hz_sparse(k, b, params, bia = bia, lattice_reg = lattice_reg, **kwds)
+	if split != 0.0:
+		hamsplit = split * hsplit_full(params, splittype, k = k, kdim = 2, bia = bia, lattice_reg = lattice_reg)
+		ham += hamsplit
+	return ham
+
+def hbulk_split(k, b, params, split = 0.0, splittype = 'auto', bia = False, lattice_reg = False, **kwds):
+	"""Thin wrapper of hbulk and hsplit_full.
+
+	Useful as argument for SymbolicHamiltonian constructor and other functions,
+	that require the combination to be defined as a single function.
+	"""
+	ham = hbulk(k, b, params, bia = bia, lattice_reg = lattice_reg, **kwds)
+	if split != 0.0:
+		hamsplit = split * hsplit_full(params, splittype, k = k, kdim = 3, bia = bia, lattice_reg = lattice_reg)
+		ham += hamsplit
+	return ham
+
+
+def hz_sparse_pot(params, pot, norb = None):
+	"""Sparse constructor for a potential in z direction."""
+	nz = params.nz
+	if norb is None:
+		norb = params.norbitals
+
+	pot = np.asarray(pot)
+	if pot.shape == (nz,):
+		diag = np.array([np.repeat(pot, norb),])
+	elif pot.ndim == 2 and pot.shape[0] == nz and pot.shape[1] >= norb:
+		diag = pot[:, :norb].flatten()
+	else:
+		sys.stderr.write("ERROR (hz_sparse_pot): Potential vector has incorrect shape/size: %i where %i expected\n" % (len(pot), nz))
+		exit(1)
+	offsets = np.array([0,])
+	s = dia_matrix((diag, offsets), shape = (norb * nz, norb * nz))
+	return s.tocsc()
+
+def hz_sparse_pot_ll_full(params, ll_max, pot, norb = None):
+	"""Sparse constructor for a potential in z direction for full LL mode."""
+	nz = params.nz
+	if norb is None:
+		norb = params.norbitals
+
+	if len(pot) != nz:
+		sys.stderr.write("ERROR (hz_sparse_pot): Potential vector has incorrect size\n")
+		exit(1)
+	elif np.asarray(pot).ndim != 1:
+		sys.stderr.write("ERROR (hz_sparse_pot): Per-orbital potential not (yet) supported in full LL mode.\n")
+		exit(1)
+
+	delta_n_vec = delta_n_ll(norb)  # (sign of) magnetic field not required (since we only need count)
+	sizes_norb_ll = np.array([np.count_nonzero(n + delta_n_vec >= 0) for n in range(-2, ll_max + 1)])
+	data = np.concatenate([np.repeat(pot, sizes_norb_ll[n+2]) for n in range(-2, ll_max + 1)])
+	size = nz * np.sum(sizes_norb_ll)
+	offsets = np.array([0,])
+	s = dia_matrix((data, offsets), shape = (size, size))
+	return s.tocsc()
+
+# Wrappper for parallel matrix constructions:
+
+def hz_sparse(k, b, params, solver = None, **kwds):
+	""" Define wrapper for parallel constructor to maintain full compatibility with existing code"""
+	return h_constructor(hz_sparse_worker, k, b, params, solver= solver, **kwds)
+
+def hz_sparse_ll(b, n, params, solver = None, **kwds):
+	""" Define wrapper for parallel constructor to maintain full compatibility with existing code"""
+	return h_constructor(hz_sparse_ll_worker, b, n, params, solver= solver, **kwds)
+
+def hzy_sparse(kx, b, params, periodicy = False, solver = None, **kwds):
+	""" Define wrapper for parallel constructor to maintain full compatibility with existing code"""
+	return h_constructor(hzy_sparse_worker, kx, b, params, periodicy = periodicy, solver= solver, **kwds)
+
+def hzy_sparse_magn(kx, b, params, periodicy = False, solver = None, **kwds):
+	""" Define wrapper for parallel constructor to maintain full compatibility with existing code"""
+	return h_constructor(hzy_sparse_magn_worker, kx, b, params, periodicy = periodicy, solver= solver, **kwds)
diff --git a/kdotpy-v1.0.0/src/kdotpy/hamiltonian/parity.py b/kdotpy-v1.0.0/src/kdotpy/hamiltonian/parity.py
new file mode 100644
index 0000000000000000000000000000000000000000..ef76bca66c503d12efeaaf7f9f56f5fdb657c6ee
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/hamiltonian/parity.py
@@ -0,0 +1,200 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+
+from scipy.sparse import coo_matrix
+
+### PARITY OPERATORS ###
+
+def parity_z(arg1, arg2 = None, norb = 6, isoparity = False, zrange = None):
+	"""Parity operator in z: z \\mapsto -z
+
+	The first two arguments control the size of the matrix; they may be one of
+	the following combinations:
+	arg1 = nz, arg2 = ny       Set linear matrix size to nz * ny * norb
+	arg1 = nz, arg2 = None     Set linear matrix size to nz * norb
+	arg1 = param, arg2 = None  Get nz and ny from arg1.nz and arg1.ny (where
+	                           arg1 is a PhysParams instance)
+
+	Remaining arguments:
+	norb       Integer. Number of orbitals, should be 6 or 8.
+	isoparity  True or False. If True, include signs for spin/orbital part; we
+	           call the resulting operator 'isoparity'.
+	zrange     2-tuple or None. If a 2-tuple, the range of z coordinates where
+	           to apply the operator z' -> -z', where z' = z - m with m the
+	           middle of the range. Both start and end point are inclusive. If
+	           None (default), apply z -> -z to the complete layer stack.
+
+	Returns:
+	A scipy.sparse.csc_matrix instance.
+	"""
+	if isinstance(arg1, (int, np.integer)) and isinstance(arg2, (int, np.integer)):
+		nz, ny = arg1, arg2
+	elif isinstance(arg1, (int, np.integer)) and arg2 is None:
+		nz, ny = arg1, 1
+	elif hasattr(arg1, 'nz') and hasattr(arg1, 'ny') and arg2 is None:  # most likely a PhysParams instance
+		nz, ny = arg1.nz, arg1.ny
+	else:
+		raise TypeError("Arguments must be PhysParam instance, a pair of integers nz, ny, or a single integer nz")
+
+	if zrange is None:
+		zindices = np.arange(0, nz)
+		nzi = nz  # number of values in zindices
+	elif isinstance(zrange, tuple) and len(zrange) == 2:
+		zindices = np.arange(zrange[0], zrange[1] + 1)
+		nzi = len(zindices)  # number of values in zindices
+	else:
+		raise TypeError("Argument zrange must be a 2-tuple or None.")
+
+	rows1 = np.arange(0, norb)
+	cols1 = np.arange(0, norb)
+	rows2 = zindices * norb
+	cols2 = zindices[::-1] * norb
+	rows3 = np.arange(0, ny) * (norb * nz)
+	cols3 = np.arange(0, ny) * (norb * nz)
+
+	rows12 = np.repeat(rows2, norb) + np.tile(rows1, nzi)
+	cols12 = np.repeat(cols2, norb) + np.tile(cols1, nzi)
+
+	rows = np.repeat(rows3, norb * nzi) + np.tile(rows12, ny)
+	cols = np.repeat(cols3, norb * nzi) + np.tile(cols12, ny)
+	if isoparity:
+		if norb not in [6, 8]:
+			raise ValueError("Argument or variable norb should be 6 or 8")
+		vals = np.tile(np.array([1., -1., 1., -1., 1., -1., -1., 1.][:norb], dtype = complex), nzi * ny)
+	else:
+		vals = np.ones((norb * nzi * ny,), dtype = complex)
+
+	m = coo_matrix((vals, (rows, cols)), shape = (norb * nz * ny, norb * nz * ny), dtype = complex)
+	return m.tocsc()
+
+def parity_x(arg1, arg2 = None, norb = 6, isoparity = False):
+	"""Parity operator in x: x \\mapsto -x)
+	Arguments control the size of the matrix; they may be one of the following
+	combinations:
+	arg1 = nz, arg2 = ny       Set linear matrix size to nz * ny * norb
+	arg1 = param, arg2 = None  Get nz and ny from arg1.nz and arg1.ny (where
+	                           arg1 is a PhysParams instance)
+
+	Remaining arguments:
+	norb       Integer. Number of orbitals, should be 6 or 8.
+	isoparity  True or False. If True, include signs for spin/orbital part; we
+	           call the resulting operator 'isoparity'.
+
+	Returns:
+	A scipy.sparse.csc_matrix instance.
+	"""
+	if isinstance(arg1, (int, np.integer)) and isinstance(arg2, (int, np.integer)):
+		nz, ny = arg1, arg2
+	elif hasattr(arg1, 'nz') and hasattr(arg1, 'ny') and arg2 is None:  # most likely a PhysParams instance
+		nz, ny = arg1.nz, arg1.ny
+	else:
+		raise TypeError("Arguments must be PhysParam instance or pair of integers nz, ny.")
+
+	rows1 = np.arange(0, norb)
+	if isoparity:
+		if norb not in [6, 8]:
+			raise ValueError("Argument or variable norb should be 6 or 8")
+		cols1 = np.array([1, 0, 5, 4, 3, 2, 7, 6])[:norb]
+	else:
+		cols1 = np.arange(0, norb)
+	rows23 = np.arange(0, nz * ny) * norb
+	cols23 = np.arange(0, nz * ny) * norb
+
+	rows = np.repeat(rows23, norb) + np.tile(rows1, nz * ny)
+	cols = np.repeat(cols23, norb) + np.tile(cols1, nz * ny)
+
+	if isoparity:
+		vals = np.tile(np.array([1., 1., 1., 1., 1., 1., -1., -1.][:norb], dtype = complex), nz * ny)
+	else:
+		vals = np.ones((norb * nz * ny,), dtype = complex)
+
+	m = coo_matrix((vals, (rows, cols)), shape = (norb * nz * ny, norb * nz * ny), dtype = complex)
+	return m.tocsc()
+
+def parity_y(arg1, arg2 = None, norb = 6, isoparity = False):
+	"""Parity operator in y: y \\mapsto -y)
+	Arguments control the size of the matrix; they may be one of the following
+	combinations:
+	arg1 = nz, arg2 = ny       Set linear matrix size to nz * ny * norb
+	arg1 = param, arg2 = None  Get nz and ny from arg1.nz and arg1.ny (where
+	                           arg1 is a PhysParams instance)
+
+	Remaining arguments:
+	norb       Integer. Number of orbitals, should be 6 or 8.
+	isoparity  True or False. If True, include signs for spin/orbital part; we
+	           call the resulting operator 'isoparity'.
+
+	Returns:
+	A scipy.sparse.csc_matrix instance.
+	"""
+	if isinstance(arg1, (int, np.integer)) and isinstance(arg2, (int, np.integer)):
+		nz, ny = arg1, arg2
+	elif hasattr(arg1, 'nz') and hasattr(arg1, 'ny') and arg2 is None:  # most likely a PhysParams instance
+		nz, ny = arg1.nz, arg1.ny
+	else:
+		raise TypeError("Arguments must be PhysParam instance or pair of integers nz, ny.")
+
+	rows1 = np.arange(0, norb)
+	if isoparity:
+		if norb not in [6, 8]:
+			raise ValueError("Argument or variable norb should be 6 or 8")
+		cols1 = np.array([1, 0, 5, 4, 3, 2, 7, 6])[:norb]
+	else:
+		cols1 = np.arange(0, norb)
+	rows2 = np.arange(0, nz) * norb
+	cols2 = np.arange(0, nz) * norb
+	rows3 = np.arange(0, ny) * (norb * nz)
+	cols3 = np.arange(ny-1, -1, -1) * (norb * nz)
+
+	rows12 = np.repeat(rows2, norb) + np.tile(rows1, nz)
+	cols12 = np.repeat(cols2, norb) + np.tile(cols1, nz)
+
+	rows = np.repeat(rows3, norb * nz) + np.tile(rows12, ny)
+	cols = np.repeat(cols3, norb * nz) + np.tile(cols12, ny)
+	if isoparity:
+		vals = np.tile(np.array([-1.j, 1.j, 1.j, -1.j, 1.j, -1.j, 1.j, -1.j][:norb], dtype = complex), nz * ny)
+	else:
+		vals = np.ones((norb * nz * ny,), dtype = complex)
+
+	m = coo_matrix((vals, (rows, cols)), shape = (norb * nz * ny, norb * nz * ny), dtype = complex)
+	return m.tocsc()
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/hamiltonian/tools.py b/kdotpy-v1.0.0/src/kdotpy/hamiltonian/tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..d665a1cef557a2289665f97d1e4411e84a25d5c4
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/hamiltonian/tools.py
@@ -0,0 +1,103 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+
+### FUNCTIONS FOR DEBUGGING AND EXTERNAL CALCULATION ###
+# Normally, these functions are not used
+
+_red = "\x1b[1;31m"
+_green = "\x1b[32m"
+_reset = "\x1b[0m"
+
+def herm_check(a, b, z = None, y = None, do_raise = False):
+	"""Hermiticity check. Debugging function for use in the sparse matrix constructors.
+
+	Arguments:
+	a, b   Matrices that need to be compared.
+	z, y   Data to be printed.
+	do_raise  Whether to raise an exception if the hermiticity check fails."""
+	if z is not None:
+		print(z, end=' ')
+	if y is not None:
+		print(y, end=' ')
+	herm_err = np.amax(np.abs(a - b.conjugate().transpose()))
+	print(herm_err, _red + 'HERMITICITY ERROR' + _reset if herm_err > 1e-8 else _green + 'Hermitian' + _reset)
+	if herm_err > 1e-8:
+		print(a)
+		print(b.conjugate().transpose())
+		errs = np.abs(a - b.conjugate().transpose())
+		for row in np.asarray(errs):
+			s = ""
+			for x in row:
+				s += (" " + _red + ("%.3e" % x) + _reset) if x > 1e-8 else " %.3e" % 0.0
+			print(s)
+		print(a - b.conjugate().transpose(), 'a - bH')
+		if do_raise:
+			raise ValueError("Hamiltonian blocks not Hermitian")
+
+def ham_write(h, filename, split_re_im = True):
+	"""Write Hamiltonian to file"""
+	f = open(filename, "w")
+	h = h.tocsc()
+	f.write("CSC_MATRIX\n")
+	f.write("COLS=%i\n" % h.shape[0])
+	f.write("ROWS=%i\n" % h.shape[1])
+	f.write("NNZ=%i\n" % h.nnz)
+
+	f.write("INDPTR==\n")
+	for i in h.indptr:
+		f.write("%i\n" % i)
+	f.write("ROWIND==\n")
+	for i in h.indices:
+		f.write("%i\n" % i)
+	if split_re_im:
+		f.write("RE==\n")
+		for x in np.real(h.data):
+			f.write("0\n" if x == 0 else "%.19e\n" % x)
+		f.write("IM==\n")
+		for x in np.imag(h.data):
+			f.write("0\n" if x == 0 else "%.19e\n" % x)
+	else:
+		f.write("VAL==\n")
+		for x in h.data:
+			f.write("%.19e + %.19ej\n" % (np.real(x), np.imag(x)))
+	f.write("END\n")
+	f.close()
diff --git a/kdotpy-v1.0.0/src/kdotpy/hamiltonian/transform.py b/kdotpy-v1.0.0/src/kdotpy/hamiltonian/transform.py
new file mode 100644
index 0000000000000000000000000000000000000000..7cd3fc8c508adfb0430a47a44ce4b1a1f4e860b5
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/hamiltonian/transform.py
@@ -0,0 +1,659 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import sys
+import numpy as np
+from itertools import permutations
+import re
+
+class KJTensor(object):
+	"""Container that encodes products of k (momentum) and J (angular momentum).
+	The tensor values c_mnpq stand for sum_mnpq k_m k_n J_p J_q,
+	where the number of momentum components is nk and J_p encode (abstract)
+	angular momentum operators.
+
+	Attributes:
+	tensor   The tensor data. This is a multidimensional numpy array.
+	order    The dimension (rank) of the tensor.
+	shape    The shape of the tensor. For example (3, 3, 3, 3) if the
+	         tensor indices can be x, y, z.
+	nk       The number of indices that refer to momentum k. The other
+	         (order - nk) indices refer to angular momentum operators J.
+	"""
+	def __init__(self, tens_data, nk, shape = None):
+		if isinstance(tens_data, np.ndarray):
+			self.tensor = tens_data
+			self.order = tens_data.ndim
+			self.shape = tens_data.shape
+			if shape is not None:
+				raise ValueError("Shape cannot be set separately if input is already a tensor-like object")
+		elif isinstance(tens_data, dict):
+			if len(tens_data) == 0:
+				raise ValueError("Argument tens_data must not be an empty dict instance.")
+			for t in tens_data:
+				if re.match("[xyz0-9]*", t) is None:
+					raise ValueError("Invalid tensor index in tens_data")
+			orders = [len(t) for t in tens_data]
+			if not all([o == orders[0] for o in orders]):
+				raise ValueError("Tensor indices must be of equal length")
+			self.order = orders[0]
+			if shape is None:
+				self.shape = (3,)*self.order
+			elif not isinstance(shape, tuple):
+				raise TypeError("Argument shape must be a tuple or None")
+			elif len(shape) != self.order:
+				raise TypeError("Tensor order mismatch between shape and data")
+			else:
+				self.shape = shape
+			self.tensor = np.zeros(self.shape, dtype = complex)
+			for t in tens_data:
+				index = tuple([0 if c == 'x' else 1 if c == 'y' else 2 if c == 'z' else int(c) for c in t])
+				self.tensor[index] = tens_data[t]
+		else:
+			raise TypeError("Argument tens_data must be a numpy array or a dict instance.")
+		if not isinstance(nk, (int, np.integer)):
+			raise TypeError("Argument nk must be an integer")
+		if nk < 0 or nk > self.order:
+			raise ValueError("Argument nk must be >= 0 and <= order.")
+		self.nk = nk
+		if not all([l in [1, 3, 5] for l in self.shape]):
+			raise ValueError("Only tensors with 1-, 3-, and 5-dimensional axes are supported")
+
+	def __str__(self):
+		"""String representation of the tensor data."""
+		return str(self.tensor)
+
+	def chop(self, acc = 1e-13):
+		"""Chop almost-zero values from tensors"""
+		self.tensor[np.abs(self.tensor) < acc] = 0.0
+		return self
+
+	def symmetrize(self, axes = None, fill = True, in_place = False):
+		"""Symmetrize the tensor over the specified axes.
+
+		Arguments:
+		axes      tuple containing the indices of the axes to be symmetrized
+		fill      If set to True, expand non-zero matrix elements to the
+		          symmetric positions. Otherwise, do a proper symmetrization
+		          involving 1/n! factors.
+		in_place  If set to True, symmetrize the current instance, otherwise
+		          return a new instance.
+
+		Returns:
+		KJTensor instance with symmetrized data.
+		"""
+		if axes is None or axes == 'k':
+			axes = np.arange(0, self.nk, dtype = int)
+		elif isinstance(axes, (tuple, list, set)):
+			axes = np.array(axes)
+		if not (isinstance(axes, np.ndarray) and axes.ndim == 1):
+			raise TypeError("Argument indices must be a sequence-like object")
+
+		symm_shape = np.array(self.shape)[axes]
+		symm_indices = np.transpose(np.indices(symm_shape)[::-1]).reshape((np.prod(symm_shape), len(symm_shape)))
+		# print (symm_indices)
+		other_axes = np.array([i for i in range(0, self.order) if i not in axes])
+		if len(other_axes) == 0:
+			other_shape = ()
+			other_indices = np.array([None])
+		else:
+			other_shape = np.array(self.shape)[other_axes]
+			other_indices = np.transpose(np.indices(other_shape)[::-1]).reshape((np.prod(other_shape), len(other_shape)))
+		# print (other_indices)
+		symm_elements = {}
+		for s in symm_indices:
+			full_index = np.zeros(self.order, dtype = int)
+			ordered_index = np.zeros(self.order, dtype = int)
+			full_index[axes] = s
+			ordered_index[axes] = np.sort(s)
+			for o in other_indices:
+				if o is not None:
+					full_index[other_axes] = o
+					ordered_index[other_axes] = o
+				# print (full_index, ordered_index)
+				if np.abs(self.tensor[tuple(full_index)]) > 1e-13:
+					if tuple(ordered_index) not in symm_elements:
+						symm_elements[tuple(ordered_index)] = self.tensor[tuple(full_index)]
+					elif fill and np.abs(symm_elements[tuple(ordered_index)] - self.tensor[tuple(full_index)]) < 1e-13:
+						pass
+					elif fill:
+						raise ValueError("Incompatible tensor elements in symmetrization using 'fill' method [T%s != %s]" % (tuple(full_index), symm_elements[tuple(ordered_index)]))
+					else:
+						symm_elements[tuple(ordered_index)] += self.tensor[tuple(full_index)]
+		new_tensor = np.zeros(self.shape, dtype = complex)
+		for s in symm_indices:
+			full_index = np.zeros(self.order, dtype = int)
+			ordered_index = np.zeros(self.order, dtype = int)
+			full_index[axes] = s
+			ordered_index[axes] = np.sort(s)
+			mult = 1 if fill else len(set(permutations(s)))
+			# print (s, mult)
+			for o in other_indices:
+				if o is not None:
+					full_index[other_axes] = o
+					ordered_index[other_axes] = o
+				if tuple(ordered_index) in symm_elements:
+					new_tensor[tuple(full_index)] = symm_elements[tuple(ordered_index)] / mult
+		if in_place:
+			self.tensor = new_tensor
+			return self
+		else:
+			return KJTensor(new_tensor, nk = self.nk)
+
+	def transform(self, rr3, rr5 = None, in_place = False):
+		"""Transform the tensor using transformation matrix rr3.
+
+		Arguments:
+		rr3       Transformation matrix in vector representation.
+		rr5       Transformation matrix in 5-dim. representation of SO(3). If
+		          provided, it is calculated from rr3.
+		in_place  If True, the present instance is updated with the transformed
+		          tensor. If False, return a new instance.
+
+		Returns:
+		Transformed KJTensor, either the same or a new instance (depending on
+		argument in_place.
+		"""
+		if not isinstance(rr3, np.ndarray):
+			raise TypeError
+		if rr3.shape != (3, 3):
+			raise ValueError
+		if rr5 is not None and not isinstance(rr3, np.ndarray):
+			raise TypeError
+		if rr5 is not None and rr5.shape != (5, 5):
+			raise ValueError
+		for l in self.shape:
+			if l not in [1, 3, 5]:
+				raise ValueError("Transformation possible only with 1, 3, or 5 dimensional axes")
+		if 5 in self.shape and rr5 is None:
+			rr5 = so3_3to5(rr3)
+		rr1 = np.array([[1]], dtype = complex)
+		new_tensor = 1. * self.tensor  # make a copy
+		for l in self.shape:
+			rr = rr1 if l == 1 else rr3 if l == 3 else rr5
+			new_tensor = np.tensordot(new_tensor, rr.T, axes = (0, 0))
+			## Note: the multiplication is done in the following steps (einstein
+			## summation convention assumed):
+			##   T_ijk -> T_ijk R_ia = T'_jka
+			##   T_jka -> T_jka R_jb = T'_kab
+			##   T_kab -> T_kab R_kc = T'_abc
+			## The contracted index of T is always at position 0, the new index
+			## always appears at the end. The net result is:
+			##   T_ijk -> T_ijk R_ia R_jb R_kc = T'_abc
+			## This procedure extends to any order.
+		if in_place:
+			self.tensor = new_tensor
+			return self
+		else:
+			return KJTensor(new_tensor, nk = self.nk)
+
+	def is_invariant_under_transform(self, rr3, rr5 = None, acc = 1e-10):
+		"""Detect whether tensor is invariant under a transformation."""
+		new_kjt = self.transform(rr3, rr5 = rr5, in_place = False)
+		return np.amax(np.abs(self.tensor - new_kjt.tensor)) < acc
+
+	def apply_jmat(self, jmat, symmetrize_k = False):
+		"""Substitute matrices for the angular momentum operators.
+
+		Arguments:
+		jmat         List of matrices. The length of the list may be 3, 5, or 8,
+		             i.e., a 3-dim representation, a 5-dim representation, or
+		             both, respectively. The provided representations must be
+		             appropriate for the shape of the tensor, e.g., if one of
+		             the components is 3-dimensional, the 3-dim representation
+		             must be provided.
+		symmetrize_k If True, symmetrize over the k components. For example,
+		             (0, 1) and (1, 0) are summed.
+
+		Returns:
+		KTerms instance that contains a sum of matrices times a product of
+		momenta.
+		"""
+		try:
+			jmat = [np.asarray(m) for m in jmat]
+		except:
+			raise ValueError("Invalid value for argument jmat (not a list of matrices)")
+		if len(jmat) == 3:
+			jmat3, jmat5 = jmat, None
+		elif len(jmat) == 5:
+			jmat3, jmat5 = None, jmat
+		elif len(jmat) == 8:
+			jmat3, jmat5 = jmat[:3], jmat[3:]
+		else:
+			raise ValueError("Argument jmat must be a list of 3, 5, or 8 matrices")
+		jshape = jmat[0].shape
+		# print (jshape, [m.shape for m in jmat])
+		if any([m.shape != jshape for m in jmat]):
+			raise ValueError("Argument jmat must be a list of matrices of the same shape.")
+		for l in self.shape[self.nk:]:
+			if l == 3 and jmat3 is None:
+				raise ValueError("Tensor contains axis of dimension 3, but jmat3 (3-representation matrices) is not available")
+			elif l == 5 and jmat5 is None:
+				raise ValueError("Tensor contains axis of dimension 5, but jmat5 (5-representation matrices) is not available")
+			elif l not in [1, 3, 5]:
+				raise ValueError("Support only for 1, 3, 5 dimensional axes")
+
+		kj_dict = {}
+		all_indices = np.transpose(np.indices(self.shape)[::-1]).reshape((np.prod(self.shape), self.order))[:, ::-1]
+		nk = self.nk  # shortcut
+		for idx in all_indices:
+			if np.abs(self.tensor[tuple(idx)]) < 1e-13:
+				continue
+			k_idx = tuple(np.sort(idx[:nk])) if symmetrize_k else tuple(idx[:nk])
+			# j_idx = idx[nk:]
+			# print (klabel + " " + jlabel + ":", tensp[tuple(i)])
+			this_jmat = np.identity(jshape[0], dtype = complex)
+			for i, l in zip(idx[nk:], self.shape[nk:]):
+				if l == 3:
+					this_jmat = this_jmat @ jmat3[i]
+				elif l == 5:
+					this_jmat = this_jmat @ jmat5[i]
+				# if l == 1, do nothing
+			# print (tensp[tuple(i)] * jmati)
+			if k_idx in kj_dict:
+				kj_dict[k_idx] += self.tensor[tuple(idx)] * this_jmat
+			else:
+				kj_dict[k_idx] = self.tensor[tuple(idx)] * this_jmat
+		return KTerms(kj_dict, kshape = self.shape[:nk], k_symmetrized = symmetrize_k)
+
+	## Scalar multiplication
+	def __mul__(self, other):
+		if not isinstance(other, (int, float, complex, np.integer, np.floating, np.complex_)):
+			raise TypeError
+		return KJTensor(self.tensor * other, nk = self.nk)
+
+	## Scalar division
+	def __truediv__(self, other):
+		if not isinstance(other, (int, float, complex, np.integer, np.floating, np.complex_)):
+			raise TypeError
+		return KJTensor(self.tensor / other, nk = self.nk)
+
+class KTerms(dict):
+	"""Container that encodes sums of matrices time products of momenta k.
+	The data is a set of matrices encoding sums like sum_pq m_pq k_p k_q,
+	where m_pq are matrices and k_p the momentum operators.
+
+	Attributes:
+	data           A dict of numpy matrices. The dict keys encode the indices as
+	               tuples. The matrices must be of identical shape.
+	korder         The number of k components. This is the length of the index
+	               tuples.
+	shape          Shape of the matrices.
+	kshape         A tuple containing the number of indices that each k
+	               component may have. This can be determined automatically, but
+	               that is not recommended.
+	k_symmetrized  Whether the k components (indices) are symmetrized. This can
+	               be determined automatically, but that is not recommended.
+	"""
+	def __init__(self, data, kshape = None, k_symmetrized = None):
+		if isinstance(data, dict):
+			self.data = data
+		else:
+			raise TypeError
+		if len(self.data) == 0:
+			self.korder = 0
+			self.shape = ()
+		else:
+			korders = [len(i) for i in self.data]
+			self.korder = korders[0]
+			if any([o != self.korder for o in korders]):
+				raise ValueError("Non-uniform indexing")
+			shapes = [self.data[i].shape for i in self.data]
+			self.shape = shapes[0]
+			if any([s != self.shape for s in shapes]):
+				raise ValueError("Non-uniform data")
+		if isinstance(kshape, tuple):
+			self.kshape = kshape
+		elif kshape is None:
+			max_indices = np.amax(list(self.data.keys()), axis = 0)
+			self.kshape = tuple([3 if maxidx <= 2 else 5 if maxidx <= 4 else 0 for maxidx in max_indices])
+			if 0 in self.kshape:
+				raise ValueError("Unable to determine kshape")
+		else:
+			raise TypeError("Argument kshape must be a tuple or None")
+		if len(self.data) == 0:
+			self.korder = len(self.kshape)
+		elif len(self.kshape) != self.korder:
+			raise ValueError("Properties kshape and korder are inconsistent")
+		if k_symmetrized is None:
+			# Determine automatically whether there is symmetrization over the k indices (not recommended)
+			if self.korder <= 1:
+				self.k_symmetrized = True
+			else:
+				self.k_symmetrized = True
+				for idx in self.data:
+					sorted_idx = tuple(sorted(idx))
+					if idx != sorted_idx:
+						if sorted_idx not in self.data:
+							self.k_symmetrized = False
+							break
+						elif np.amax(np.abs(self.data[idx] - self.data[sorted_idx])) > 1e-13:
+							self.k_symmetrized = False
+							break
+		else:
+			self.k_symmetrized = k_symmetrized
+
+	def __getitem__(self, *i):
+		"""Smart method for getting an element (matrix) from the data.
+		The input may be numeric (tuple of integers), a string of numbers (e.g.,
+		'00') or a string of the letters x, y, z (e.g., 'xx').
+		"""
+		if isinstance(i, tuple) and len(i) == 1 and isinstance(i[0], tuple):
+			index = i[0]
+		elif isinstance(i, tuple) and len(i) == 1 and isinstance(i[0], str):
+			index = tuple([0 if c == 'x' else 1 if c == 'y' else 2 if c == 'z' else int(c) for c in i[0]])
+		elif isinstance(i, tuple) and all([isinstance(ii, (int, np.integer)) for ii in i]):
+			index = i
+		else:
+			raise TypeError
+		if len(index) != self.korder:
+			raise KeyError("Index has invalid number of components")
+		if index in self.data:
+			return self.data[index]
+		elif self.korder == 0:
+			return 0
+		else:
+			return np.zeros(self.shape, dtype = complex)
+
+	def __str__(self):
+		"""String representation: Newline-separated string of the items in data."""
+		return "\n".join([str(i) + "\n" + str(self.data[i]) for i in sorted(self.data)])
+
+	def chop(self, acc = 1e-13):
+		"""Chop almost-zero values from matrices"""
+		for i in self.data:
+			self.data[i][np.abs(self.data[i]) < acc] = 0.0
+		return self
+
+	def __iter__(self):
+		"""Iterator over data items"""
+		return iter(self.data)
+
+	def __eq__(self, other):
+		if not isinstance(other, KTerms):
+			raise TypeError("Comparison == must be between two KTerms instances.")
+		if self.shape != other.shape:
+			return False
+		for index in self.data:
+			if index not in other.data:
+				return False
+			if np.amax(np.abs(self.data[index] - other.data[index])) >= 1e-13:
+				return False
+		return True
+
+	def axial_part(self, in_place = False):
+		"""Calculate the axial part of the k term defined by the KTerms instance.
+
+		Returns:
+		The present instance with the axial part (in_place = True) or a new
+		instance (in_place = False).
+		"""
+		if self.kshape == (3,):
+			axdata = [None] * 3
+			axdata[0] = {(2,): self[2]}  # m(z) kz
+			axdata[1] = {(0,): 0.5 * self[0] - 0.5j * self[1], (1,): 0.5j * self[0] + 0.5 * self[1]}  # (1/2) [m(x) - i m(y)] kx + (i/2) [m(x) - i m(y)] ky
+			axdata[-1] = {(0,): 0.5 * self[0] + 0.5j * self[1], (1,): -0.5j * self[0] + 0.5 * self[1]}  # (1/2) [m(x) + i m(y)] kx - (i/2) [m(x) + i m(y)] ky
+		elif self.kshape == (3, 3):
+			axdata = [None] * 5
+			axdata[0] = {(2, 2): self[2, 2], (0, 0): 0.5 * (self[0, 0] + self[1, 1]), (1, 1): 0.5 * (self[0, 0] + self[1, 1])}  # m(zz) kz^2 + (1/2) [m(xx)+m(yy)] (kx^2 + ky^2)
+			if not self.k_symmetrized:  # Antisymmetric term: (1/2) [m(xy) - m(yx)] (kx ky - ky kx)
+				axdata[0][(0, 1)] = 0.5 * (self[0, 1] - self[1, 0])
+				axdata[0][(1, 0)] = -0.5 * (self[0, 1] - self[1, 0])
+			axdata[1] = {(0, 2): 0.5 * self[0, 2] - 0.5j * self[1, 2], (1, 2): 0.5j * self[0, 2] + 0.5 * self[1, 2],
+			             (2, 0): 0.5 * self[2, 0] - 0.5j * self[2, 1], (2, 1): 0.5j * self[2, 0] + 0.5 * self[2, 1]}  # (1/2) [m(xz) - i m(yz)] kx kz + (i/2) [m(xz) - i m(yz)] ky kz + (1st <-> 2nd)
+			axdata[-1] = {(0, 2): 0.5 * self[0, 2] + 0.5j * self[1, 2], (1, 2): -0.5j * self[0, 2] + 0.5 * self[1, 2],
+			              (2, 0): 0.5 * self[2, 0] + 0.5j * self[2, 1], (2, 1): -0.5j * self[2, 0] + 0.5 * self[2, 1]}  # (1/2) [m(xz) + i m(yz)] kx kz - (i/2) [m(xz) + i m(yz)] ky kz + (1st <-> 2nd)
+			mat_pp = 0.25 * (self[0, 0] - self[1, 1]) - 0.25j * (self[0, 1] + self[1, 0])  # m(++) = (1/4) [m(xx) - m(yy)] + (1/4i) [m(xy) + m(yx)]
+			mat_mm = 0.25 * (self[0, 0] - self[1, 1]) + 0.25j * (self[0, 1] + self[1, 0])  # m(--) = (1/4) [m(xx) - m(yy)] - (1/4i) [m(xy) + m(yx)]
+			if self.k_symmetrized:
+				axdata[2] = {(0, 0): mat_pp, (1, 1): -mat_pp, (0, 1): 2j * mat_pp}  # (kx^2 - ky^2 + 2i kx ky) m(++)
+				axdata[-2] = {(0, 0): mat_mm, (1, 1): -mat_mm, (0, 1): -2j * mat_mm}  # (kx^2 - ky^2 - 2i kx ky) m(--)
+			else:
+				axdata[2] = {(0, 0): mat_pp, (1, 1): -mat_pp, (0, 1): 1j * mat_pp, (1, 0): 1j * mat_pp}  # (kx^2 - ky^2 + i kx ky + i ky kx) m(++)
+				axdata[-2] = {(0, 0): mat_mm, (1, 1): -mat_mm, (0, 1): -1j * mat_mm, (1, 0): -1j * mat_mm}  # (kx^2 - ky^2 - i kx ky - i ky kx) m(--)
+		elif self.kshape == (5,):
+			axdata = [None] * 5
+			# (K0, K1, K2, K3, K4) = (2 ky kz, 2 kx kz, 2 kx ky, kx^2 - ky^2, (2 kz^2 - kx^2 - ky^2) / sqrt(3))
+			axdata[0] = {(4,): self[4]}  # m(4) K4
+			axdata[1] = {(1,): 0.5 * self[1] - 0.5j * self[0], (0,): 0.5j * self[1] + 0.5 * self[0]}  # (1/2) (m1 - i m0) (K1 + i K0)
+			axdata[-1] = {(1,): 0.5 * self[1] + 0.5j * self[0], (0,): -0.5j * self[1] + 0.5 * self[0]}  # (1/2) (m1 + i m0) (K1 - i K0)
+			axdata[2] = {(2,): 0.5 * self[2] + 0.5j * self[3], (3,): -0.5j * self[2] + 0.5 * self[3]}  # (1/2) (m2 + i m3) (K2 - i K3)
+			axdata[-2] = {(2,): 0.5 * self[2] - 0.5j * self[3], (3,): 0.5j * self[2] + 0.5 * self[3]}  # (1/2) (m2 - i m3) (K2 + i K3)
+		else:
+			sys.stderr.write("ERROR (KTerms.axial_part): Axial approximation not implemented for kshape = %s.\n" % self.kshape)
+			return self
+
+		ix, iy = np.indices(self.shape)
+		# Calculate matrix of 'Delta J', i.e. differences in the Jz eigenvalue for row and column states.
+		if self.shape[0] == self.shape[1]:  # square matrices
+			idx_delta = 0
+		elif self.shape == (2, 4):
+			idx_delta = 1
+		elif self.shape == (4, 2):
+			idx_delta = - 1
+		else:
+			raise NotImplementedError
+		mat_deltaj = ix - iy + idx_delta
+
+		newmat = {}
+		jmax = (len(axdata) - 1) // 2
+		zero = np.zeros(self.shape, dtype = complex)
+		for j in range(-jmax, jmax + 1):
+			for i in axdata[j]:
+				# Calculate the terms of the matrix from axdata corresponding to Delta J (value j)
+				mat = np.where(mat_deltaj == j, np.array(axdata[j][i]), zero)
+				if np.amax(np.abs(mat)) < 1e-13:
+					pass
+				elif i in newmat:
+					newmat[i] += mat
+				else:
+					newmat[i] = mat
+		# for i in list(set(list(self.data.keys()) + list(newmat.keys()))):
+		# 	if i in self.data:
+		# 		print (self.data[i], i, 'OLD')
+		# 	else:
+		# 		print ("== ZERO ==", i, 'OLD')
+		# 	if i in newmat:
+		# 		print (newmat[i], i, 'NEW')
+		# 	else:
+		# 		print ("== ZERO ==", i, 'NEW')
+		# print ()
+		if in_place:
+			self.data = newmat
+			return self
+		else:
+			return KTerms(newmat, kshape = self.kshape, k_symmetrized = self.k_symmetrized)
+
+	def is_axial(self):
+		"""Test whether the KTerms instance defines an axially symmetric k term. This is
+		done by testing equality between the full term and its axial part.
+		"""
+		axial = self.axial_part(in_place = False)
+		for idx in self.data:
+			if np.amax(np.abs(self[idx] - axial[idx])) > 1e-13:
+				return False
+		for idx in axial:
+			if np.amax(np.abs(self[idx] - axial[idx])) > 1e-13:
+				return False
+		return True
+
+class KTermsDict(object):
+	"""Container for KTerms instances, useful to define a Hamiltonian with many terms.
+
+	Attributes:
+	data   dict of KTerms instances. The dict keys may be anything, but strings
+	       are preferred.
+	"""
+	def __init__(self, data = None):
+		if data is None:
+			self.data = {}
+		elif isinstance(data, dict) and all(isinstance(data[x], KTerms) for x in data):
+			self.data = data
+		else:
+			raise TypeError("Argument data must be a dict of KTerms instances or None")
+
+	def __getitem__(self, x, *y):
+		"""Get item data[x] (y not present) or data[x][y], where y are the indices for KTerms instance data[x]."""
+		kterms = self.data[x]
+		if len(y) == 0:
+			return kterms
+		else:
+			return kterms.__getitem__(*y)
+
+	def __setitem__(self, x, val):
+		"""Add or change an item."""
+		if isinstance(val, KTerms):
+			self.data[x] = val
+		else:
+			raise TypeError("Value must be a KTerms instance")
+
+	def __iter__(self):
+		"""Iterator over all KTerms instances in data."""
+		return iter(self.data)
+
+	def axial_approximation(self, in_place = False, exclude_strain = True):
+		"""Apply axial approximation, as defined above, on all KTerms instances except
+		strain terms. [Strain terms: see Pfeuffer-Jeschke, PhD thesis, App. C.1.] The
+		exclusion of strain terms can be avoided by setting exclude_strain = False.
+		"""
+		if in_place:
+			for kt in self.data:
+				if exclude_strain and 'strain' in kt:
+					pass
+				else:
+					self.data[kt].axial_part(in_place = True)
+			return self
+		else:
+			newdata = {}
+			for kt in self.data:
+				if exclude_strain and 'strain' in kt:
+					newdata[kt] = self.data[kt]
+				else:
+					newdata[kt] = self.data[kt].axial_part(in_place = False)
+			return KTermsDict(newdata)
+
+
+_s3 = np.sqrt(3.)
+def so3_3to5(rr):
+	"""Return 5-dim representation matrix of SO(3) based on the vector representation (3-dim).
+	The 5-dimensional basis is 2 y z, 2 x z, 2 x y, x^2-y^2, (2z^2-x^2-y^2) / sqrt(3).
+	"""
+	if not isinstance(rr, np.ndarray) or rr.shape != (3, 3):
+		raise TypeError("Argument rr must be a 3x3 numpy array")
+	# TODO: Check orthogonality
+	rr5 = np.zeros((5, 5), dtype = float)
+	midx = [(1,2), (2,0), (0,1)]
+	for i in [0, 1, 2]:
+		ii, ij = midx[i]
+		for j in [0, 1, 2]:
+			ji, jj = midx[j]
+			# print (i,j,'->',ii,ji,'*',ij,jj,';',ii,jj,'*',ij,ji)
+			rr5[i, j] = rr[ii, ji] * rr[ij, jj] + rr[ii, jj] * rr[ij, ji]
+		rr5[i, 3] = rr[ii, 0] * rr[ij, 0] - rr[ii, 1] * rr[ij, 1]
+		rr5[i, 4] = _s3 * rr[ii, 2] * rr[ij, 2]
+	for j in [0, 1, 2]:
+		ji, jj = midx[j]
+		rr5[3, j] = rr[0, ji] * rr[0, jj] - rr[1, ji] * rr[1, jj]
+		rr5[4, j] = _s3 * rr[2, ji] * rr[2, jj]
+	rr5[3, 3] = 0.5 * (rr[0, 0]**2 - rr[1, 0]**2) - 0.5 * (rr[0, 1]**2 - rr[1, 1]**2)
+	rr5[3, 4] = 0.5 * _s3 * (rr[0, 2]**2 - rr[1, 2]**2)
+	rr5[4, 3] = 0.5 * _s3 * (rr[2, 0]**2 - rr[2, 1]**2)
+	rr5[4, 4] = 1.5 * rr[2, 2]**2 - 0.5
+
+	rr5[np.abs(rr5) < 1e-10] = 0.0
+	return rr5
+
+def lattice_reg_transform(k, cc, tfm, quadratic = False):
+	"""Lattice regularized vector based on ordinary vector k in sample coordinates.
+
+	Arguments:
+	k         Momentum value; a list or an array of length 1, 2, or 3, or a
+	          number.
+	cc        Lattice constant
+	tfm       The 3x3 transformation matrix that encodes the transformation
+	          between lattice and sample coordinates.
+	quadratic If True, return quadratic components, else linear components.
+
+	Returns:
+	Lattice regularized value of either
+	  kx, ky, kz (quadratic if False); or
+	  kx^2, ky^2, kz^2, ky kz, kx kz kx ky (quadratic = True)
+	If k has less than 3 components, the undefined components are excluded from
+	the return value."""
+	if isinstance(k, (float, int, np.floating, np.integer)):
+		kvec = np.array([k])
+		isnumber = True
+	else:
+		kvec = np.asarray(k)
+		isnumber = False
+	if len(kvec.shape) != 1 or kvec.shape[0] not in [1, 2, 3]:
+		raise ValueError("Argument k must be a tuple, list, etc. of 1, 2, or 3 components.")
+	if not isinstance(tfm, np.ndarray) or tfm.shape != (3, 3):
+		raise TypeError("Argument tfm must be a 3x3 numpy array")
+	dim = kvec.shape[0]
+	if dim < 3:
+		kvec = np.concatenate((kvec, np.zeros(3 - dim)))
+	tfm_arr = np.asarray(tfm)
+	invtfm = np.linalg.inv(tfm_arr)
+
+	# Apply inverse transformation to lattice coordinate frame (momentum space)
+	k_latt = np.dot(invtfm, kvec)
+	# Do linear (sine) lattice regularization
+	k_latt_sin = np.sin(cc * k_latt) / cc
+	# Transform back to sample coordinates
+	k_sin = np.dot(tfm_arr, k_latt_sin)
+	k_sin[np.abs(k_sin) < 1e-13] = 0.0
+	if quadratic:
+		# Do the same for the quadratic (cosine) lattice regularization
+		k_latt_cos = (1. - np.cos(cc * k_latt)) * 2. / cc**2
+		k_latt_cos_mat = np.zeros((3, 3), dtype = float)
+		for i in range(0, 3):
+			for j in range(0, 3):
+				k_latt_cos_mat[i, j] = k_latt_cos[i] if i == j else k_latt_sin[i] * k_latt_sin[j]
+		k_cos_mat = np.dot(np.dot(tfm_arr, k_latt_cos_mat), tfm_arr.T)
+		k_cos_mat[np.abs(k_cos_mat) < 1e-13] = 0.0
+		k_cos = [k_cos_mat[i, i] for i in range(0, dim)]
+		if dim == 3:
+			k_cos += [k_cos_mat[1, 2], k_cos_mat[0, 2]]
+		if dim >= 2:
+			k_cos += [k_cos_mat[0, 1]]
+		return k_cos[0] if isnumber else tuple(k_cos)
+	else:
+		return k_sin[0] if isnumber else tuple(k_sin[:dim])
diff --git a/kdotpy-v1.0.0/src/kdotpy/hamiltonian/transformable.py b/kdotpy-v1.0.0/src/kdotpy/hamiltonian/transformable.py
new file mode 100644
index 0000000000000000000000000000000000000000..eca0f5736d094bf9389d21a61fbc65635613cefd
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/hamiltonian/transformable.py
@@ -0,0 +1,133 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+from .. import spinmat as spin
+from .transform import KJTensor, KTermsDict
+
+def h_kterms(params, axial = True, verbose = False):
+	"""Transformable Hamiltonian.
+
+	This function defines all terms of the k.p Hamiltonian in terms of KJTensor
+	instances, which are then transformed (transformation data contained in
+	params) and turned into KTerms instances by applying the angular momentum
+	matrices defined in spinmat.py.
+
+	Arguments:
+	params    SysParams instance. This function uses the transformation matrix
+	          (params.lattice_trans) only.
+	axial     Whether to apply the axial approximation.
+	verbose   Whether to show diagnostic output.
+
+	Returns:
+	A KTermsDict instance that contains all KTerms instances described above.
+	"""
+	# Define Hamiltonian tensors (KJTensor instances)
+	mu_tens = KJTensor({'xxxx':2, 'xxyy':-1, 'xxzz':-1, 'yyyy':2, 'yyxx':-1, 'yyzz':-1, 'zzzz': 2, 'zzxx':-1, 'zzyy':-1, 'yzyz':-1.5, 'xzxz':-1.5, 'yxyx':-1.5}, nk = 2).symmetrize((0,1), fill = True).symmetrize((2,3), fill = True) * 1 / 3  # gamma2 - gamma3 coefficient
+	gg_tens = KJTensor({'xxxx':2, 'xxyy':-1, 'xxzz':-1, 'yyyy':2, 'yyxx':-1, 'yyzz':-1, 'zzzz': 2, 'zzxx':-1, 'zzyy':-1, 'yzyz':1.5, 'xzxz':1.5, 'yxyx':1.5}, nk = 2).symmetrize((0,1), fill = True).symmetrize((2,3), fill = True) * 1 / 3  # gamma2 + gamma3 coefficient
+	kappa_tens = KJTensor({'xyz': 1, 'yzx': 1, 'zxy': 1, 'xzy': -1, 'yxz': -1, 'zyx': -1}, nk = 2)
+	g3_tens = KJTensor({'yzyz':1, 'xzxz':1, 'yxyx':1}, nk = 2).symmetrize((0,1), fill = True).symmetrize((2,3), fill = True)  # gamma3 coefficient only (e.g., for magnetic fields)
+	magn_tens = KJTensor({'yzyz':1, 'zxzx':1, 'xyxy':1, 'zyyz':-1, 'xzzx':-1, 'yxxy':-1, 'yzzy':1, 'zxxz':1, 'xyyx':1, 'zyzy':-1, 'xzxz':-1, 'yxyx':-1}, nk = 2)
+	# magn_tens = KJTensor({'xyz':1, 'yzx':1, 'zxy':1, 'xyz':1, 'yzx':1, 'zxy':1}, nk = 1)
+	mu_tens5 = KJTensor({'00': -1, '11': -1, '22': -1, '33': 1, '44': 1}, nk = 1, shape = (5, 5)) * 1 / 2
+	gg_tens5 = KJTensor({'00':  1, '11':  1, '22':  1, '33': 1, '44': 1}, nk = 1, shape = (5, 5)) * 1 / 2
+	# BIA Terms
+	c88_tens = KJTensor({'xxyy':1, 'xyyx':1, 'xxzz':-1, 'xzzx':-1, 'yyzz':1, 'yzzy':1, 'yyxx':-1, 'yxxy':-1, 'zzxx':1, 'zxxz':1, 'zzyy':-1, 'zyyz':-1}, nk = 1)  # c coefficient (H88)
+	b8p_tens = KJTensor({'xyz':1, 'xzy':1, 'yxz':1, 'yzx':1, 'zxy':1, 'zyx':1}, nk = 2)  # B8+ coefficient (H68)
+	b8m_tens = KJTensor({'34':-1, '43':1}, nk = 1, shape = (5, 5))  # B8- coefficient (H68)
+	b7_tens = KJTensor({'xyz':1, 'xzy':1, 'yxz':1, 'yzx':1, 'zxy': 1, 'zyx':1}, nk = 2)  # B7 coefficient (H67)
+	c87_tens = KJTensor({'x0':1, 'y1':1, 'z2':1}, shape = (3, 5), nk = 1)  # B7 coefficient (H67)
+	# Strain terms
+	strain_b_tens = KJTensor({'xxxx':2, 'xxyy':-1, 'xxzz':-1, 'yyyy':2, 'yyxx':-1, 'yyzz':-1, 'zzzz': 2, 'zzxx':-1, 'zzyy':-1}, nk = 2).symmetrize((0,1), fill = True).symmetrize((2,3), fill = True) * 1 / 3  # b coefficient (like gamma2)
+	strain_d_tens = KJTensor({'xyxy':1, 'yzyz':1, 'zxzx':1}, nk = 2).symmetrize((0,1), fill = True).symmetrize((2,3), fill = True) * 1 / 3  # d coefficient (like gamma3)
+
+	# Transform
+	if params.lattice_trans is not None:
+		if verbose:
+			print()
+			print("Invariance of transformable Hamiltonian under lattice transformation:")
+		all_tens = [mu_tens, gg_tens, kappa_tens, g3_tens, magn_tens, mu_tens5, gg_tens5, c88_tens, b8p_tens, b8m_tens, b7_tens, c87_tens, strain_b_tens, strain_d_tens]
+		tens_names = ['mu', 'gammabar', 'kappa', 'gamma3', 'magn', 'mu5', 'gammabar5', 'c88', 'b8+', 'b8-', 'b7', 'c87', 'b_s', 'd_s']
+		for tens, tens_name in zip(all_tens, tens_names):
+			invariant = tens.is_invariant_under_transform(params.lattice_trans)
+			if verbose:
+				print("%-10s: %s" % (tens_name, invariant))
+			if not invariant:
+				tens.transform(params.lattice_trans, in_place = True).chop()
+				# NOTE: Do not transform if the tensor is invariant
+
+		# gg_tens, gg_tens5, and kappa_tens are spherically invariant
+
+	# Create empty KTermsDict instance
+	kterms = KTermsDict()
+
+	# Define KTerms
+	kterms['mu88'] = mu_tens.apply_jmat(spin.j3basis, symmetrize_k = True).chop()
+	kterms['gg88'] = gg_tens.apply_jmat(spin.j3basis, symmetrize_k = True).chop()
+	kterms['kappa88'] = kappa_tens.apply_jmat(spin.j3basis, symmetrize_k = False).chop()  # No symmetrization, because term is antisymmetric
+	kterms['g3_88'] = g3_tens.apply_jmat(spin.j3basis, symmetrize_k = True).chop()  # TODO: Check whether necessary
+	kterms['magn'] = magn_tens.apply_jmat(spin.j3basis, symmetrize_k = False).chop()  # TODO: Check whether necessary
+	kterms['mu78'] = mu_tens5.apply_jmat(spin.t5basis, symmetrize_k = True).chop()
+	kterms['gg78'] = gg_tens5.apply_jmat(spin.t5basis, symmetrize_k = True).chop()
+	kterms['kappa78'] = kappa_tens.apply_jmat(spin.t3basis, symmetrize_k = False).chop()  # No symmetrization, because term is antisymmetric
+	kterms['mu87'] = mu_tens5.apply_jmat(spin.u5basis, symmetrize_k = True).chop()
+	kterms['gg87'] = gg_tens5.apply_jmat(spin.u5basis, symmetrize_k = True).chop()
+	kterms['kappa87'] = kappa_tens.apply_jmat(spin.u3basis, symmetrize_k = False).chop()  # No symmetrization, because term is antisymmetric
+
+	kterms['bia_c88'] = c88_tens.apply_jmat(spin.j3basis).chop()
+	kterms['bia_b8p'] = b8p_tens.apply_jmat(spin.t3basis, symmetrize_k = True).chop()
+	kterms['bia_b8m'] = b8m_tens.apply_jmat(spin.t5basis).chop()
+	kterms['bia_b7'] = b7_tens.apply_jmat(spin.sigma3basis, symmetrize_k = True).chop()
+	kterms['bia_c87'] = c87_tens.apply_jmat(spin.t5basis).chop()
+	# hermitian conjugates
+	kterms['bia_b8pH'] = b8p_tens.apply_jmat([m.conjugate().transpose() for m in spin.t3basis], symmetrize_k = True).chop()
+	kterms['bia_b8mH'] = b8m_tens.apply_jmat([m.conjugate().transpose() for m in spin.t5basis]).chop()
+	kterms['bia_c87H'] = c87_tens.apply_jmat([m.conjugate().transpose() for m in spin.t5basis]).chop()
+
+	kterms['strain_b'] = strain_b_tens.apply_jmat(spin.j3basis, symmetrize_k = True).chop()
+	kterms['strain_d'] = strain_d_tens.apply_jmat(spin.j3basis, symmetrize_k = True).chop()
+
+	if axial:  # axial = True ==> discard nonaxial terms (except strain terms)
+		if verbose:
+			print()
+			print("Axial symmetry of terms of transformable Hamiltonian:")
+			for term_name in sorted(kterms):
+				print("%-10s: %s" % (term_name, kterms[term_name].is_axial()))
+		kterms.axial_approximation(in_place = True)
+	return kterms
diff --git a/kdotpy-v1.0.0/src/kdotpy/hdf5o.py b/kdotpy-v1.0.0/src/kdotpy/hdf5o.py
new file mode 100644
index 0000000000000000000000000000000000000000..602b534d24857aab5090515c78e4582e09880bda
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/hdf5o.py
@@ -0,0 +1,102 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import sys
+import time
+import numpy as np
+try:
+	import h5py
+	HAS_H5PY = True
+except:
+	HAS_H5PY = False
+
+def create(filename):
+	"""Create/initialize a HDF5 file"""
+	if not HAS_H5PY:
+		sys.stderr.write("ERROR (hdf5o.create): Python package 'h5py' required but not installed.\n")
+		return False
+	with h5py.File(filename, 'w') as f:
+		grp = f.create_group('info')
+		grp.create_dataset('generator', data=np.char.encode('kdotpy', 'utf-8'))
+	return True
+
+def append(filename, groupname = None, data = {}, attr = {}):
+	"""Append data to an existing HDF5 file.
+
+	If it does not exist, an exception is raised."""
+	if not HAS_H5PY:
+		sys.stderr.write("ERROR (hdf5o.append): Python package 'h5py' required but not installed.\n")
+		return False
+	with h5py.File(filename, 'r+') as f:
+		if isinstance(groupname, str):
+			try:
+				grp = f.create_group(groupname)
+			except ValueError as ex:
+				if "already exists" in str(ex).lower():
+					sys.stderr.write("ERROR (hdf5o.append): Data point with this label already exists.\n")
+					return False
+				else:
+					raise
+		else:
+			raise ValueError("Argument groupname must be a string")
+		if isinstance(data, dict):
+			for x in data:
+				grp.create_dataset(x, data = data[x])
+		else:
+			raise TypeError("Argument data must be a dict")
+		if isinstance(attr, dict):
+			for x in attr:
+				grp.attrs[x] = attr[x]
+		else:
+			raise TypeError("Argument attr must be a dict")
+	return True
+
+def append_retry(*args, **kwds):
+	"""Wrapper around append() to retry if multiple processes are trying to use it simultaneously"""
+	max_tries = 300
+	time_interval = 0.2
+	for i in range(0, max_tries):
+		try:
+			return append(*args, **kwds)
+		except OSError as ex:
+			time.sleep(time_interval)
+			if i == max_tries - 1:
+				raise
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/integrate.py b/kdotpy-v1.0.0/src/kdotpy/integrate.py
new file mode 100644
index 0000000000000000000000000000000000000000..85f76a5cbdabc1dc31e61e7643378cd97a93d326
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/integrate.py
@@ -0,0 +1,135 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+
+def trapezoidal_integration(arr):
+    """Trapezoidal integration
+    Yields a primitive function of the same length as the input array. The
+    return value is the sum over all array elements. The total sum is conserved,
+    symmetry is not.
+
+    Argument:
+    arr   Numpy array
+
+    Return
+    prim  Numpy array. A primitive function of input array arr.
+    """
+    zero = np.array([0.0])
+
+    arr_r = np.concatenate((arr[1:], zero))  # f(a_{j+1})
+    arr_l = np.concatenate((zero, arr[:-1]))  # f(a_{j-1})
+
+    l_trap = 0.375 * arr + 0.125 * arr_l
+    r_trap = 0.375 * arr + 0.125 * arr_r
+    l_trap[-1] = l_trap[-1] + 0.125 * arr[-1]
+    r_trap[0] = r_trap[0] + 0.125 * arr[0]
+
+    return np.cumsum(l_trap) + np.cumsum(r_trap)
+
+
+def basic_integration(arr):
+    """Basic integration
+    Yields a primitive function of the same length as the input array. The
+    return value is (B[i] + B[i+1]) / 2, where B is the cumulative sum of the
+    input array.
+
+    Argument:
+    arr   Numpy array
+
+    Return
+    prim  Numpy array. A primitive function of input array arr.
+    """
+    zero = np.array([0.0])
+
+    arr_ext = np.concatenate((zero, arr))
+    arr_cs = np.cumsum(arr_ext)
+
+    return 0.5 * (arr_cs[1:] + arr_cs[:-1])
+
+
+def integrate_arr(arr):
+    """Alias for either trapeziodal_integration or basic_integration"""
+    return basic_integration(arr)
+
+
+def special_diff(arr, y0=None, i0=0, automatic=False):
+    """Special derivative.
+    Designed to be the inverse of basic_integration(). That function takes the
+    (B[i] + B[i+1]) / 2, essentially a convolution. Thus, here we need to invert
+    that step by doing a deconvolution. This requires the initial value as extra
+    input, otherwise we get an alternating error +q, -q, +q, -q, ... . This
+    function corrects this value either from explicit input or automatically, by
+    assumption that the edges are linear.
+
+    Arguments:
+    arr        Numpy array of 1 dim.
+    y0         Float or None. If given, the initial value.
+    i0         Integer. Where the initial value should be applied.
+    automatic  True or False. If True, test if function is linear at the left
+               and/or right edge and apply due correction. If no linearity is
+               detected, no correction is applied.
+
+    Returns:
+    diff_arr   Numpy array. The derivative of arr.
+    """
+
+    if not isinstance(arr, np.ndarray):
+        raise TypeError("Argument arr must be a numpy array")
+    if arr.ndim != 1:
+        raise ValueError("Argument arr must be of dimension 1")
+    # Do deconvolution of (B[i] + B[i+1]) / 2
+    y = [0.0]
+    for x1 in np.diff(arr):
+        y.append(2 * x1 - y[-1])
+    # Determine and apply correction term
+    m = (-1) ** np.mod(np.arange(len(arr)), 2)
+    if automatic and len(arr) > 3:
+        ql = (y[2] - 2 * y[1] + y[0]) / 4
+        qr = (y[-3] - 2 * y[-2] + y[-1]) / 4
+        condl = (abs(y[2] - y[0]) < 1e-12 * abs(y[1] - y[0]))
+        condr = (abs(y[-3] - y[-1]) < 1e-12 * abs(y[-2] - y[-1]))
+        q = (ql + qr) / 2 if condl and condr else ql if condl else qr if condr else 0
+    elif y0 is not None:
+        i0 %= len(arr)  # index modulo array length
+        q = (y[i0] - y0) * m[i0]
+    else:
+        q = 0
+    return np.array(y) - q * m
diff --git a/kdotpy-v1.0.0/src/kdotpy/intervaltools.py b/kdotpy-v1.0.0/src/kdotpy/intervaltools.py
new file mode 100644
index 0000000000000000000000000000000000000000..b019e0d8f78d7e31d075539e560e42a717b62917
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/intervaltools.py
@@ -0,0 +1,118 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+
+def union(a, b, c, d):
+	"""Calculate union of two simple real intervals [a, b] and [c, d]"""
+	if a > b:
+		a, b = b, a
+	if c > d:
+		c, d = d, c
+	if a <= b < c <= d:
+		return [(a, b), (c, d)]
+	if c <= d < a <= b:
+		return [(c, d), (a, b)]
+	return [(min(a, c), max(b, d))]
+
+def intersection(a, b, c, d):
+	"""Calculate intersection of two simple real intervals [a, b] and [c, d]"""
+	if a > b:
+		a, b = b, a
+	if c > d:
+		c, d = d, c
+	if a <= b < c <= d:
+		return []
+	if c <= d < a <= b:
+		return []
+	return [(max(a, c), min(b, d))]
+
+def normalize(intervals):
+	"""Normalize a union of real intervals
+
+	Argument:
+	intervals  A list of 2-tuples. Each 2-tuple (a, b) with real values a, b
+	           represents a simple interval [a, b].
+
+	Returns:
+	result_iv  A list of 2-tuples, representing a union of simple intervals.
+	           This result is simplified as much as possible, i.e., the simple
+	           intervals are disjoint and in increasing order.
+	"""
+	if len(intervals) == 0:
+		return []
+	if len(intervals) == 1:
+		return intervals
+	sorted_iv = [(min(iv), max(iv)) for iv in sorted(intervals)]
+	result_iv = [sorted_iv[0]]
+	for iv in sorted_iv[1:]:
+		prev_iv = result_iv[-1]
+		i = intersection(*prev_iv, *iv)
+		if len(i) == 0:
+			result_iv.append(iv)
+		else:
+			result_iv[-1] = union(*prev_iv, *iv)[0]
+	return result_iv
+
+def from_eivals(eival, target = None):
+	"""Get interval from set of eigenvalues; optionally take into account target energy.
+
+	Example:
+	Suppose one finds eigenvalues between -1.5 and 8.3 for target energy 4.0. If
+	target is not set, then simply return (-1.5, 8.3). If target is set to 4.0,
+	then the maximum distance is max(|8.3 - 4.0|, |-1.5 - 4.0|) = 5.5; then
+	return (4.0 - 5.5, 4.0 + 5.5) = (-1.5, 9.5).
+
+	Arguments:
+	eival     List or array. The eigenvalues.
+	target    Numeric or None. If numeric, return the interval in which the
+	          shift-and-invert method has scanned.
+
+	Returns:
+	interval  2-tuple.
+	"""
+	if target is None:
+		# strict: the smallest and largest value
+		return (min(eival), max(eival))
+	else:
+		# relaxed: interval less than max distance away from target value
+		max_dist = np.amax(np.abs(eival - target))
+		return (target - max_dist, target + max_dist)
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/kdotpy-1d.py b/kdotpy-v1.0.0/src/kdotpy/kdotpy-1d.py
new file mode 100644
index 0000000000000000000000000000000000000000..cb107162ce4f7be7a9eba893550334494369cd7d
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/kdotpy-1d.py
@@ -0,0 +1,268 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+from os import environ
+environ['OMP_NUM_THREADS'] = '1'
+import sys
+import os.path
+import numpy as np
+
+from .config import initialize_config, get_config_int, get_config_bool
+from .materials import initialize_materials
+from . import cmdargs
+from .momentum import VectorGrid
+from .models import ModelMomentum1D
+from .diagonalization import DiagData, DiagDataPoint
+from .physparams import print_length_scales
+
+from .observables import all_observables, get_all_obsids
+from .parallel import set_job_monitor
+from .diagonalization import diagsolver as dsolv
+from .diagonalization import diagonalization as diag
+from .cnp import estimate_charge_neutrality_point
+from .bandtools import get_overlap_eivec
+from .potential import read_potential, print_potential, subband_potential, potential_file_overwrite_warning
+from .momentum import ZippedKB, get_momenta_from_locations
+
+from . import wf
+from . import xmlio
+from . import tableo
+from . import ploto
+from . import postprocess
+
+sysargv = cmdargs.sysargv
+
+#### MAIN PROGRAM ####
+def main():
+	SCRIPT = os.path.basename(__file__)  # the filename of this script, without directory
+	scriptdir = os.path.dirname(os.path.realpath(__file__))
+	initialize_config()
+	initialize_materials()
+	numpy_printprecision = get_config_int('numpy_printprecision', minval = 0)
+	numpy_linewidth = get_config_int('numpy_linewidth', minval = 0)
+	np.set_printoptions(precision=numpy_printprecision, linewidth=numpy_linewidth)
+	ploto.initialize()
+
+	## Process command line arguments
+	params = cmdargs.params(kdim = 1)
+
+	ks = cmdargs.vectorvalues('k', onedim = True, twodim = False)
+	bs = cmdargs.vectorvalues('b', onedim = True, twodim = True, threedim = True, defaultaxis = 'z')
+	try:
+		kbs = ZippedKB(ks, bs)
+	except ValueError:
+		sys.stderr.write("ERROR (%s): Momentum k, magnetic field b, or both must be a constant.\n" % SCRIPT)
+		exit(1)
+	dependence = kbs.dependence()
+	job_monitor_limit = get_config_int('job_monitor_limit', minval = 0)
+	set_job_monitor(len(kbs) <= job_monitor_limit)
+	## No warning required for (in-plane) magnetic field, because it is treated
+	## fully in this mode.
+
+	opts = cmdargs.options()
+	plotopts = cmdargs.plot_options(format_args = (params, opts, kbs))
+	erange = cmdargs.erange()
+	curdir, outdir = cmdargs.outdir()  # changes dir as well
+	outputid = cmdargs.outputid(format_args = (params, opts, kbs))
+
+	## Warning for combination of in-plane strain with non-trivial strip direction
+	## See function RSTUVepsilon() in physparams.py
+	nontriv_orient = (isinstance(params.lattice_trans, (int, float, np.integer)) and np.abs(params.lattice_trans) > 1e-6) or isinstance(params.lattice_trans, np.ndarray)
+	# TODO: For anisotropic in-plane strain and non-trivial strip direction, axial approximation is ignored
+
+	## Define observables
+	obsids = get_all_obsids(kdim=1, ll=False, norb=params.norbitals, opts=opts)
+	all_observables.initialize(param = params, dimful = 'dimful_obs' in opts and opts['dimful_obs'])
+
+	## Print warnings related to periodic boundary conditions in y direction
+	if 'periodicy' in opts and opts['periodicy']:
+		if params.magn != 0.0:
+			sys.stderr.write("Warning (Main): Periodic boundary conditions and nonzero magnetic field provide an unphysical model if not chosen commensurately.\n")
+		if params.yconfinement != 0.0:
+			sys.stderr.write("Warning (Main): Confinement in y direction is ignored when periodic boundary conditions in y direction are assumed.\n")
+			params.yconfinement = 0.0
+
+	## Do test on length scales and print warnings if applicable
+	print_length_scales(params)
+
+	pot = None
+	poty = None
+	if "selfcon" in sysargv:
+		sys.stderr.write("ERROR (%s): Cannot run self-consistent calculation inside 'kdotpy 1d'. Run 'kdotpy 2d' with selfcon separately and use the potential for `kdotpy 1d' manually.\n")
+		exit(1)
+	elif 'potentialfile' in opts:
+		pot = read_potential(params, opts['potentialfile'], directory = curdir)
+		if pot is not None:
+			print("Electrostatic potential:")
+			print_potential(params, pot)
+			ploto.potential(params, pot, filename = "potential%s.pdf" % outputid, ylabel = "V", yunit = "meV", text = "Potential energy (electron)")
+			potential_file_overwrite_warning("potential%s.csv" % outputid, opts.get('potentialfile'), directory = curdir)
+			tableo.potential("potential%s.csv" % outputid, params, pot, precision = 8, clabel = 'potential', units='meV')
+	elif 'ypotentialfile' in opts:
+		poty = read_potential(params, opts['ypotentialfile'], axis = 'y', directory = curdir)
+		if poty is not None:
+			print("Electrostatic potential (y direction)")
+
+	## Plots of parameters as function of z
+	if "plotfz" in sysargv or "plotqz" in sysargv:
+		postprocess.q_z(params, outputid, pot=pot, legend="legend" in sysargv)
+
+	modelopts_default = {'energy': 0.0, 'neig': 50, 'lattice_reg': False, 'split': 0.0, 'splittype': 'auto',
+	                     'ignorestrain': False, 'obs': obsids, 'periodicy': False, 'gauge_zero': 0.0, 'axial': True,
+	                     'obs_prop': all_observables, 'bia': False, 'ignore_magnxy': False, 'return_eivec': False}
+	mapping = {'targetenergy': 'energy'}
+	modelopts = cmdargs.initialize_opts(opts, modelopts_default, mapping)
+	num_cpus = opts.get('cpu', 1)
+	if modelopts['bia'] and modelopts['split'] != 0.0:
+		sys.stderr.write("Warning (%s): With BIA, the requested splitting will be applied only to certain momenta in order to lift degeneracies without causing unwanted asymmetries.\n" % SCRIPT)
+	if pot is not None and not isinstance(pot, dict):
+		modelopts['pot'] = pot
+	modelopts['lattice_reg'] = get_config_bool('lattice_regularization')
+	if modelopts['lattice_reg'] is True:
+		sys.stderr.write("Warning (%s): It is recommended to disable lattice regularization using the configuration option 'lattice_regularization=false'.\n" % SCRIPT)
+
+	# Calculate bands at k = 0 (2D configuration)
+	modelopts_k0 = modelopts.copy()
+	modelopts_k0['return_eivec'] = True
+	modelopts_k0['erange'] = erange
+	# solver_k0 = dsolv.solverconfig(num_cpus, modelopts_k0)
+	# modelopts_k0['solver'] = solver_k0  # Append the solver to the model options to get used by diagonalizers
+	del modelopts_k0['erange']
+	for key in ['obs', 'periodicy', 'gauge_zero', 'ignore_magnxy']:
+		if key in modelopts_k0:
+			del modelopts_k0[key]
+	sys.stderr.write("Calculating bands (k=0)...\n")
+	params.magn = 0.0    # Set the magnetic field to zero here
+	diagdata_k0 = diag.hz_k0(params, **modelopts_k0)
+	sys.stderr.write("1 / 1\n")
+	e0 = estimate_charge_neutrality_point(params, data=diagdata_k0)
+
+	overlap_eivec = None
+	if 'overlaps' in sysargv:
+		overlap_subbands = ['E1+', 'E1-', 'E2+', 'H1+', 'H1-', 'H2+', 'H2-', 'H3+', 'H3-', 'L1+', 'L1-']
+		overlap_eivec = get_overlap_eivec(diagdata_k0, overlap_subbands, obs = plotopts.get('obs'))
+	if isinstance(poty, dict):
+		poty = subband_potential(params, poty, overlap_eivec)
+	params.magn = 0.0 if bs is None else bs[0]  # Set magnetic field to the desired value
+
+	if poty is not None:
+		modelopts['poty'] = poty
+
+	modelopts['erange'] = erange
+	solver = dsolv.solverconfig(num_cpus, modelopts, SCRIPT)
+	modelopts['solver'] = solver  # Append the solver to the model options to get used by diagonalizers
+	del modelopts['erange']
+
+	if len(kbs) == 1 and ("plotwf" in sysargv):
+		## Calculate bands at the given k value and determine band types
+		# sys.stderr.write("Calculating wave functions...\n")
+		wfstyle, wflocations = cmdargs.plotwf(onedim = True, twodim = False)
+		kwf = get_momenta_from_locations(kbs, wflocations)
+
+		if isinstance(kwf, (VectorGrid, list)) and len(kwf) > 0:
+			if len(kwf) > 1:
+				sys.stderr.write("Warning (%s): Wave functions are calculated only at a single k point.\n" % SCRIPT)
+				k1 = min(kwf) if isinstance(kwf, VectorGrid) else kwf[0]
+			else:
+				k1 = kwf[0]
+			b1 = kbs.b[0]
+			modelopts['return_eivec'] = True
+			data0 = diag.hzy((k1, b1), params, return_bandtypes = True, **modelopts)
+			data = DiagData(data0)
+			sys.stderr.write("1 / 1\n")
+
+			wf.onedim_ddp(data0, params, style = wfstyle, filename = "wfs%s" % outputid, erange = erange, overlap_eivec = overlap_eivec)
+			data0.delete_eivec()
+		else:
+			sys.stderr.write("Warning (%s): Nothing to be done, because grid values and wave function locations do not match.\n" % SCRIPT)
+			exit(0)
+	else:
+		## Calculate dispersion
+		if overlap_eivec is not None:
+			obsids.extend(sorted([bt for bt in overlap_eivec]))
+			modelopts['obs'] = obsids
+			modelopts['overlap_eivec'] = overlap_eivec
+		modelopts['params'] = params
+		data = DiagData([DiagDataPoint(kb[0], paramval=kb[1], grid_index=i) for i, kb in enumerate(kbs)], grid=kbs.get_grid())
+		data.diagonalize(ModelMomentum1D(modelopts), solver)
+		if "plotwf" in sysargv:
+			sys.stderr.write("Warning (%s): In 1D mode, the option 'plotwf' can only be used with a single momentum value.\n" % SCRIPT)
+		if "symmetrytest" in sysargv:
+			print()
+			print("Symmetry test:")
+			data.symmetry_test('x')
+		if 'symmetrize' in sysargv:
+			data = data.symmetrize('x')
+			if 'symmetrytest' in sysargv:
+				print()
+				print("Symmetries after symmetrization:")
+				data.symmetry_test('x')
+
+	## Dispersion / B dependence:
+	dependencestr = "bdependence" if dependence == 'b' else 'dispersion'
+	dependencedata = [data.get_paramval(), "b", "T"] if dependence == 'b' else None
+	## Write Table
+	tableo.disp("%s%s.csv" % (dependencestr, outputid), data, params, observables = obsids, dependence = dependencedata)
+
+	## Write XML
+	xmlio.writefile("output%s.xml" % outputid, data, params, observables = obsids, caller = SCRIPT, options = opts, dependence = dependencedata)
+
+	## Write plot
+	if len(data.shape) == 1:
+		ploto.bands_1d(data, filename = "%s%s.pdf" % (dependencestr, outputid), showplot = False, erange = erange, **plotopts)
+	else:
+		sys.stderr.write("Warning (%s): For 0-dimensional arrays, skip plot.\n" % SCRIPT)
+
+	## Density of states
+	if "dos" in sysargv and dependence == 'k':
+		idos, energies = postprocess.dos_k(params, data, erange, outputid, opts, plotopts, energies = {'e0': e0}, onedim = True)
+	else:
+		idos = None
+
+	## Warning for unparsed arguments
+	unparsed = sysargv.unparsed_warning(color = sys.stderr.isatty())
+	if unparsed is not None:
+		sys.stderr.write("Warning (%s): The following marked command line arguments were not used: %s\n" % (SCRIPT, unparsed))
+
+	exit(0)
+
+if __name__ == '__main__':
+	main()
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/kdotpy-2d.py b/kdotpy-v1.0.0/src/kdotpy/kdotpy-2d.py
new file mode 100644
index 0000000000000000000000000000000000000000..a0cb75f2671ce50cb1102180dd936b76b81c2326
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/kdotpy-2d.py
@@ -0,0 +1,373 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+from os import environ
+environ['OMP_NUM_THREADS'] = '1'
+import sys
+import os.path
+import numpy as np
+
+from .config import initialize_config, get_config_int, get_config_bool
+from .materials import initialize_materials
+from . import cmdargs
+from .momentum import VectorGrid, ZippedKB, get_momenta_from_locations
+from .models import ModelMomentum2D
+from .diagonalization import DiagData, DiagDataPoint
+from .observables import all_observables, get_all_obsids
+
+from .density import print_densityz
+from .potential import gate_potential_from_opts, print_potential, read_potential, potential_file_overwrite_warning
+from .parallel import set_job_monitor
+from .diagonalization import diagsolver as dsolv
+from .diagonalization import diagonalization as diag
+from .bandalign import bandindices, bandindices_adiabatic
+from .cnp import estimate_charge_neutrality_point
+from .bandtools import get_overlap_eivec, set_disp_derivatives
+from .extrema import band_local_extrema, band_minima_maxima, print_band_extrema, print_gap_information
+from . import symmetry
+from . import selfcon
+
+from . import wf
+from . import xmlio
+from . import tableo
+from . import ploto
+from . import postprocess
+
+sysargv = cmdargs.sysargv
+
+#### MAIN PROGRAM ####
+def main():
+	SCRIPT = os.path.basename(__file__)  # the filename of this script, without directory
+	scriptdir = os.path.dirname(os.path.realpath(__file__))
+	initialize_config()
+	initialize_materials()
+	numpy_printprecision = get_config_int('numpy_printprecision', minval = 0)
+	numpy_linewidth = get_config_int('numpy_linewidth', minval = 0)
+	np.set_printoptions(precision=numpy_printprecision, linewidth=numpy_linewidth)
+	ploto.initialize()
+
+	## Process command line arguments
+	params = cmdargs.params(kdim = 2)
+
+	ks = cmdargs.vectorvalues('k', onedim = True, twodim = True)
+	bs = cmdargs.vectorvalues('b', onedim = True, twodim = True, threedim = True, defaultaxis = 'z')
+	try:
+		kbs = ZippedKB(ks, bs)
+	except ValueError:
+		sys.stderr.write("ERROR (%s): Momentum k, magnetic field b, or both must be a constant.\n" % SCRIPT)
+		exit(1)
+	dependence = kbs.dependence()
+	job_monitor_limit = get_config_int('job_monitor_limit', minval = 0)
+	set_job_monitor(len(kbs) <= job_monitor_limit)
+
+	opts = cmdargs.options()
+	plotopts = cmdargs.plot_options(format_args = (params, opts, kbs))
+	erange = cmdargs.erange()
+	curdir, outdir = cmdargs.outdir()  # changes dir as well
+	outputid = cmdargs.outputid(format_args = (params, opts, kbs))
+	bandalign_opts = cmdargs.bandalign(directory = curdir)
+
+	if isinstance(bs, VectorGrid) and not bs.zero():
+		if 'ignore_magnxy' in opts and opts['ignore_magnxy']:
+			sys.stderr.write("Warning (%s): The 'orbital part' of the magnetic field is neglected because option 'ignore_magnxy' is enabled. Only Zeeman effect, paramagnetic exchange, etc. are considered.\n" % SCRIPT)
+		else:
+			if not bs.is_inplane():  # has out-of-plane components
+				sys.stderr.write("Warning (%s): The 'orbital part' of the out-of-plane magnetic field is neglected in this calculation mode. Zeeman effect, paramagnetic exchange, etc. are considered.\n" % SCRIPT)
+			if not bs.is_vertical():  # has in-plane components
+				sys.stderr.write("Warning (%s): The implementation for the 'orbital part' of the in-plane magnetic field is experimental.\n" % SCRIPT)
+
+	## Define observables
+	obsids = get_all_obsids(kdim=2, ll=False, norb=params.norbitals, opts=opts)
+	all_observables.initialize(param = params, dimful = 'dimful_obs' in opts and opts['dimful_obs'])
+
+	modelopts_default = {'energy': 0.0, 'neig': 50, 'lattice_reg': False, 'split': 0.0, 'splittype': 'auto',
+	                     'ignorestrain': False, 'obs': obsids, 'axial': True, 'obs_prop': all_observables,
+	                     'bia': False, 'ignore_magnxy': False, 'return_eivec': False}
+	mapping = {'targetenergy': 'energy'}
+	modelopts = cmdargs.initialize_opts(opts, modelopts_default, mapping)
+	num_cpus = opts.get('cpu', 1)
+	if modelopts['bia'] and modelopts['split'] != 0.0:
+		sys.stderr.write("Warning (%s): With BIA, the requested splitting will be applied only to certain momenta in order to lift degeneracies without causing unwanted asymmetries.\n" % SCRIPT)
+	modelopts['lattice_reg'] = get_config_bool('lattice_regularization')
+	if modelopts['lattice_reg'] is True:
+		sys.stderr.write("Warning (%s): It is recommended to disable lattice regularization using the configuration option 'lattice_regularization=false'.\n" % SCRIPT)
+
+	# Initialize solver
+	modelopts_solver = modelopts.copy()
+	modelopts_solver['erange'] = erange
+	solver = dsolv.solverconfig(num_cpus, modelopts_solver, SCRIPT)
+	modelopts['solver'] = solver
+
+	energies = {}  # For special energies like CNP, E_F, ...
+	pot = None
+
+	if 'potentialfile' in opts:
+		pot = read_potential(params, opts['potentialfile'], directory = curdir)
+
+	if "selfcon" in sysargv:
+		if dependence != 'k':
+			sys.stderr.write("ERROR (%s): Self-consistent Hartree potential can be calculated only for a momentum dependence (dispersion).\n" % SCRIPT)
+			exit(1)
+		set_job_monitor(False)
+		print("Modern OOP self-consistent Hartree")
+		scopts_default = {'max_iterations': 10, 'min_iterations': 0, 'target_accuracy': 0.01, 'time_step': 0.9, 'num_cpus': 1}
+		mapping = {'selfcon_max_iterations': 'max_iterations', 'selfcon_accuracy': 'target_accuracy', 'selfcon_weight': 'time_step', 'cpu': 'num_cpus'}
+		scopts = cmdargs.initialize_opts(opts, scopts_default, mapping)
+		scopts['erange'] = erange
+		scopts['outputid'] = outputid
+
+		potopts_default = {'v_inner': None, 'v_outer': None, 'cardens': None, 'n_depletion': None, 'l_depletion': None, 'efield': None, 'n_offset': None, 'n_bg': None, 'custom_bc': None}
+		mapping = {'vgate': 'v_outer'}
+		potopts = cmdargs.initialize_opts(opts, potopts_default, mapping)
+
+		# Read from config what selfcon mode to use
+		if get_config_bool('selfcon_full_diag'):
+			print("Using the full-diagonalization approach for the self-consistent Hartree method.")
+			selfcon_solver = selfcon.SelfConSolverFullDiag
+		else:
+			print("Using the electron/hole picture based on the location of the CNP for the self-consistent Hartree method.")
+			selfcon_solver = selfcon.SelfConSolver
+
+		scs = selfcon_solver(
+			kbs, params, modelopts = modelopts, bandalign_opts = bandalign_opts,
+			opts = opts, **scopts)
+		scs.init_potential(potential = pot, **potopts)
+		scs.run()
+		pot = scs.get_potential()
+		energies.update(**scs.special_energies)
+		opts['cardens'] = scs.cardens
+		densz = scs.get_densityz_dict(qdens=True)
+		print_densityz(params, densz, cardens = scs.cardens)
+		ploto.densityz(params, densz, filename = "densz%s.pdf" % outputid, legend = True)
+		tableo.densityz(params, densz, filename = "densz%s.csv" % outputid)
+	elif ('vgate' in opts or 'vsurf' in opts or 'v_outer' in opts or 'v_inner' in opts) and 'potentialfile' not in opts:
+		pot = gate_potential_from_opts(params, opts)
+
+	if pot is not None:
+		print("Electrostatic potential:")
+		print_potential(params, pot)
+		ploto.q_z(params, pot, filename = "potential%s.pdf" % outputid, ylabel = "V", yunit = "meV", text = "Potential energy (electron)")
+		potential_file_overwrite_warning("potential%s.csv" % outputid, opts.get('potentialfile'), directory=curdir)
+		tableo.q_z("potential%s.csv" % outputid, params, pot, precision = 8, clabel = 'potential', units='meV')
+
+	## Plots of parameters as function of z
+	if "plotfz" in sysargv or "plotqz" in sysargv:
+		postprocess.q_z(params, outputid, pot=pot, legend="legend" in sysargv)
+
+	# Calculate bands at k = 0
+	modelopts_k0 = modelopts.copy()
+	modelopts_k0['return_eivec'] = True
+	del modelopts_k0['solver']
+	if pot is not None:
+		diagdata_k0 = bandindices_adiabatic(params, pot = pot, num_cpus = num_cpus, modelopts = modelopts_k0, bandalign_opts = bandalign_opts)
+		e0 = diagdata_k0.get_eival0()
+	else:
+		# modelopts_k0['erange'] = erange
+		# solver = dsolv.solverconfig(num_cpus, modelopts_k0)
+		# modelopts_k0['solver'] = solver  # Append the solver to the model options to get used by diagonalizers
+		# del modelopts_k0['erange']
+		sys.stderr.write("Calculating bands (k=0)...\n")
+		diagdata_k0 = diag.hz_k0(params, **modelopts_k0)
+		sys.stderr.write("1 / 1\n")
+		e0 = estimate_charge_neutrality_point(params, data=diagdata_k0)
+	modelopts['pot'] = pot
+	energies.update(e0 = e0)
+
+	overlap_eivec = None
+	if 'overlaps' in sysargv:
+		bandindices(DiagData([diagdata_k0]), input_data = diagdata_k0, params = params, **bandalign_opts)  # Store band indices for use in get_overlap_eivec
+		overlap_subbands = ['E1+', 'E1-', 'H1+', 'H1-', 'H2+', 'H2-', 'L1+', 'L1-']
+		overlap_eivec = get_overlap_eivec(diagdata_k0, overlap_subbands, obs = plotopts.get('obs'))
+		if overlap_eivec is not None:
+			obsids.extend(sorted([bt for bt in overlap_eivec]))
+			modelopts['obs'] = obsids
+
+	## Wave function options
+	if "plotwf" in sysargv:
+		wfstyle, wflocations = cmdargs.plotwf(onedim = False, twodim = True)
+		wflocations = get_momenta_from_locations(kbs, wflocations)
+		modelopts['wflocations'] = wflocations
+	else:
+		wfstyle = None
+		wflocations = None
+
+	## Calculate dispersion
+	modelopts_disp = modelopts.copy()
+	modelopts_disp['overlap_eivec'] = overlap_eivec
+	modelopts_disp['berry'] = erange if 'berry' in sysargv else False
+	modelopts_disp['erange'] = erange
+	if 'densityz' in sysargv:
+		modelopts_disp['return_eivec'] = True
+	solver = dsolv.solverconfig(num_cpus, modelopts_disp, SCRIPT)
+	modelopts_disp['solver'] = solver  # Append the solver to the model options to get used by diagonalizers
+	del modelopts_disp['erange']
+	modelopts_disp['params'] = params
+	data = DiagData([DiagDataPoint(kb[0], paramval=kb[1], grid_index=i) for i, kb in enumerate(kbs)], grid=kbs.get_grid())
+	data.diagonalize(ModelMomentum2D(modelopts_disp), solver)
+	data.set_char(diagdata_k0, eival_accuracy = solver.eival_accuracy)  # Store band characters
+	bandindices(data, input_data = diagdata_k0, params = params, **bandalign_opts)  # Store band indices
+
+	## Energy shift (TODO: Not very elegant)
+	if 'zeroenergy' in opts and opts['zeroenergy']:
+		e_ref = 0.0 if 'eshift' not in opts else opts['eshift']
+		eshift = data.set_zero_energy(e_ref)
+		if eshift is not None:
+			for e in energies:
+				energies[e] += eshift
+			sys.stderr.write("Warning (%s): Energy shifted by %.3f meV. Experimental function: "
+			                 "Other input and output energies may still refer to the unshifted energy.\n" % (SCRIPT, eshift))
+	elif 'eshift' in opts and opts['eshift'] != 0.0:
+		data.shift_energy(opts['eshift'])
+		sys.stderr.write("Warning (%s): Energy shifted by %.3f meV. Experimental function: "
+		                 "Other input and output energies may still refer to the unshifted energy.\n" % (SCRIPT, opts['eshift']))
+
+	## Symmetry test and symmetrization
+	if 'symmetrytest' in sysargv:
+		sys.stderr.write("Symmetry analysis...\n")
+		if 'split' in modelopts_disp and modelopts_disp['split'] != 0.0:
+			sys.stderr.write("Warning (%s): Nonzero splitting may reduce the symmetry group.\n" % SCRIPT)
+		symmetry.analyze(data)
+
+	if 'symmetrize' in sysargv:
+		data = data.symmetrize('xy')
+		if data.grid is not None:
+			kbs = data.grid
+		if 'symmetrytest' in sysargv:
+			print()
+			print("Symmetries after symmetrization:")
+			data.symmetry_test('x')
+			data.symmetry_test('y')
+			data.symmetry_test('xy')
+
+	## Derivatives
+	if data.grid is not None:
+		set_disp_derivatives(data, dedk = True, v = True)
+
+	## Wave functions
+	if "plotwf" in sysargv:
+		wf.twodim(data, params, wfstyle = wfstyle, wflocations = wflocations,
+		          filename = "wfs%s" % outputid, erange = erange, remember_eivec = True,
+		          dependence = 'k', set_eivec_phase = True)
+
+	## Extrema
+	if "minmax" in sysargv and dependence != 'b':
+		band_minima_maxima(data)
+	if ("extrema" in sysargv or "localminmax" in sysargv or "minmaxlocal" in sysargv) and dependence != 'b':
+		local_extrema = band_local_extrema(data)
+		print_band_extrema(local_extrema)
+		print_gap_information(local_extrema, data)
+	else:
+		local_extrema = None
+
+	## Density of states
+	if "dos" in sysargv and dependence == 'k':
+		idos, energies = postprocess.dos_k(params, data, erange, outputid, opts, plotopts, energies = energies)
+		if 'byblock' in sysargv or 'byisopz' in sysargv:
+			postprocess.dos_byobs('k', params, data, 'isopz', erange, outputid, opts, plotopts, energies = energies)
+	else:
+		idos = None
+
+	## Berry curvature
+	if "berry" in sysargv and dependence == 'k':
+		postprocess.berry_k(params, data, erange, outputid, opts, plotopts, idos = idos)
+
+	## Local density of states
+	if "localdos" in sysargv and dependence == 'k' and len(data.shape) == 1:
+		postprocess.localdos_k(params, data, erange, outputid, opts, plotopts, energies = energies)
+
+	elif "localdos" in sysargv:
+		sys.stderr.write("Warning (%s): Local DOS available only for 1-dimensional momentum (k) dispersions.\n" % SCRIPT)
+
+	## Density of states by band
+	if "banddos" in sysargv or "dosbyband" in sysargv:
+		if dependence == 'k':
+			postprocess.banddos_k(params, data, erange, outputid, opts, plotopts, energies = energies)
+		else:
+			sys.stderr.write("Warning (%s): DOS by band available only for momentum (k) dispersions.\n" % SCRIPT)
+
+	## Density as function of z
+	if "densityz" in sysargv:
+		if "symmetrize" in sysargv:
+			sys.stderr.write("ERROR (%s): Option densityz is incompatible with symmetrization.\n" % SCRIPT)
+		else:
+			postprocess.densityz(params, data, erange, outputid, opts, plotopts)
+
+	## Dispersion / B dependence:
+	dependencestr = "bdependence" if dependence == 'b' else 'dispersion'
+	dependencedata = [data.get_paramval(), "b", "T"] if dependence == 'b' else None
+	## Write Table
+	tableo.disp("%s%s.csv" % (dependencestr, outputid), data, params, observables = obsids, dependence = dependencedata)
+	if len(data.shape) in [1, 2] and dependence == 'k':
+		plotobs = plotopts.get('obs')
+		tableo.disp_byband("%s%s.csv" % (dependencestr, outputid), data, params, erange = erange, observable = plotobs)
+	if local_extrema is not None:
+		tableo.extrema("extrema%s.csv" % outputid, local_extrema)
+
+	## Write XML
+	xmlio.writefile(
+		"output%s.xml" % outputid, data, params, observables = obsids,
+		caller = SCRIPT, options = opts, bands_extrema = local_extrema,
+		dependence = dependencedata
+	)
+
+	## Write plot
+	if len(data.shape) == 1:
+		ploto.bands_1d(data, filename = "%s%s.pdf" % (dependencestr, outputid), showplot = False,
+		               erange = erange, energies = energies, **plotopts)
+	elif len(data.shape) == 2:
+		ploto.bands_2d(data, filename = "%s2d%s.pdf" % (dependencestr, outputid), showplot = False,
+		               erange = erange, energies = energies, extrema = local_extrema, **plotopts)
+	else:
+		sys.stderr.write("Warning (%s): For 0- and 2-dimensional arrays, skip plot.\n" % SCRIPT)
+
+	## BHZ/Lowdin approximation
+	if "bhz" in sysargv and dependence == 'k':
+		postprocess.bhz(params, data, erange, outputid, opts, plotopts, modelopts = modelopts)
+
+	## Warning for unparsed arguments
+	unparsed = sysargv.unparsed_warning(color = sys.stderr.isatty())
+	if unparsed is not None:
+		sys.stderr.write("Warning (%s): The following marked command line arguments were not used: %s\n" % (SCRIPT, unparsed))
+
+	exit(0)
+
+if __name__ == '__main__':
+	main()
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/kdotpy-batch.py b/kdotpy-v1.0.0/src/kdotpy/kdotpy-batch.py
new file mode 100644
index 0000000000000000000000000000000000000000..834d7f4112de77fc001c8903ac75807fd18e138a
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/kdotpy-batch.py
@@ -0,0 +1,85 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import sys
+from .parallel import parallel_apply_enumerate
+from .config import initialize_config
+from .batchtools import multi_values, replace_and_do_command, parse_batch_args, ncpu_nprocess
+
+
+def main():
+	initialize_config()
+	allvar, allval, cmd, batchopts = parse_batch_args(sys.argv)
+	ncpu, nprocess = ncpu_nprocess(cmd, **batchopts)
+
+	# Initialize batch run
+	print("Batch run")
+	print("Press Ctrl-C to interrupt")
+	exitstatuses = []
+
+	# Get values and iterate (run the commands)
+	val, strides = multi_values(*allval)
+	exitstatuses = parallel_apply_enumerate(
+		replace_and_do_command, val, f_args = (len(val), cmd, allvar, strides),
+		description = "Running scripts", num_processes = nprocess,
+		redefine_signals = False)
+
+	# Finalize; show job status
+	print("Done")
+	# print ("Exit statuses:", exitstatuses)
+	jobs_requested = len(val)
+	jobs_run = len(exitstatuses)
+	jobs_success = sum([1 if x == 0 else 0 for x in exitstatuses])
+	excode = 1
+	if jobs_success == jobs_requested:
+		print("All jobs completed successfully")
+		excode = 0
+	elif jobs_success == 0:
+		print("All jobs failed")
+	elif jobs_run < jobs_requested:
+		print("Some jobs did not start")
+	else:
+		print("Some jobs failed")
+	print("Requested: %i  Run: %i  Success: %i  Failed: %i" % (jobs_requested, jobs_run, jobs_success, jobs_run - jobs_success))
+	exit(excode)
+
+if __name__ == '__main__':
+	main()
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/kdotpy-bulk-ll.py b/kdotpy-v1.0.0/src/kdotpy/kdotpy-bulk-ll.py
new file mode 100644
index 0000000000000000000000000000000000000000..4186b2d977b844ff719e0a277dc2710cd470015c
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/kdotpy-bulk-ll.py
@@ -0,0 +1,211 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+from os import environ
+environ['OMP_NUM_THREADS'] = '1'
+import sys
+import os.path
+import numpy as np
+
+from .config import initialize_config, get_config_int, get_config_bool
+from .materials import initialize_materials
+from . import cmdargs
+from .momentum import Vector, VectorGrid, ZippedKB
+from .observables import all_observables, get_all_obsids, plotobs_apply_llmode
+from .physparams import print_length_scales
+from .symbolic import SymbolicHamiltonian
+from .hamiltonian import hbulk_split
+from .bandalign import bandindices
+from .diagonalization import DiagData
+
+from .parallel import set_job_monitor
+from .diagonalization import lldiagonalization as lldiag
+
+from . import xmlio
+from . import tableo
+from . import ploto
+
+sysargv = cmdargs.sysargv
+
+#### MAIN PROGRAM ####
+def main():
+	SCRIPT = os.path.basename(__file__)  # the filename of this script, without directory
+	scriptdir = os.path.dirname(os.path.realpath(__file__))
+	initialize_config()
+	initialize_materials()
+	numpy_printprecision = get_config_int('numpy_printprecision', minval = 0)
+	numpy_linewidth = get_config_int('numpy_linewidth', minval = 0)
+	np.set_printoptions(precision=numpy_printprecision, linewidth=numpy_linewidth)
+	ploto.initialize()
+
+	## Process command line arguments
+	ll_mode = 'legacy' if 'lllegacy' in sysargv else 'full' if 'llfull' in sysargv else 'sym'  # possible options: 'legacy', 'sym', 'full'
+	params = cmdargs.params(kdim = 3)
+	ks = cmdargs.vectorvalues('k', onedim = True, twodim = True, threedim = True)
+	bs = cmdargs.vectorvalues('b', onedim = True, twodim = True, threedim = True, defaultaxis = 'z')
+	try:
+		kbs = ZippedKB(ks, bs)
+	except ValueError:
+		sys.stderr.write("ERROR (%s): Momentum k, magnetic field b, or both must be a constant.\n" % SCRIPT)
+		exit(1)
+	if kbs.dependence() != 'b':
+		sys.stderr.write("ERROR (%s): The dependence must be on magnetic field b.\n" % SCRIPT)
+		exit(1)
+		# Note: Because of this safeguard, we can keep using the VectorGrid instance bs troughout.
+
+	opts = cmdargs.options(axial_automatic = True)
+	plotopts = cmdargs.plot_options(format_args = (params, opts, kbs))
+	erange = cmdargs.erange()
+	curdir, outdir = cmdargs.outdir()  # changes dir as well
+	outputid = cmdargs.outputid(format_args = (params, opts, kbs))
+	bs = cmdargs.vectorvalues('b', onedim = True, twodim = True, threedim = True, defaultaxis = 'z')
+	job_monitor_limit = get_config_int('job_monitor_limit', minval = 0)
+	set_job_monitor(len(bs) <= job_monitor_limit)
+
+	## Modify parameters
+	params.l_barr = 0.0
+	params.nz = 1
+
+	## Define observables
+	obsids = get_all_obsids(kdim=3, ll=True, norb=params.norbitals, opts=opts)
+	all_observables.initialize(param = params, dimful = 'dimful_obs' in opts and opts['dimful_obs'])
+
+	## Do test on length scales and print warnings if applicable
+	print_length_scales(params)
+
+	# Check and manipulate momentum value
+	if ks is None or len(ks) == 0:
+		sys.stderr.write("Warning: By default, kz = 0\n")
+		k0 = Vector(0.0, astype = 'z')
+	elif len(ks) > 1:
+		sys.stderr.write("Warning: Only at first k value; other values are ignored\n")
+		k0 = ks[0]
+	else:  # len(ks) == 1
+		k0 = ks[0]
+	kx, ky, kz = k0.xyz()
+	if abs(kx) > 1e-6 or abs(ky) > 1e-6:
+		sys.stderr.write("Warning: In LL mode, kx and ky values are ignored.\n")
+	k0 = Vector(kz, astype = 'z')
+
+	modelopts_default = {'lattice_reg': False, 'split': 0.0, 'splittype': 'auto', 'ignorestrain': False,
+	                     'obs': obsids, 'axial': True, 'bia': False, 'obs_prop': all_observables, 'return_eivec': False}
+	mapping = {'targetenergy': 'energy'}
+	modelopts = cmdargs.initialize_opts(opts, modelopts_default, mapping)
+	num_cpus = opts.get('cpu', 1)
+	ll_max = opts.get('ll_max', 30)
+
+	if isinstance(bs, VectorGrid) and not bs.is_vertical():
+		sys.stderr.write("Warning (%s): The 'orbital part' of the in-plane magnetic field (Bx, By) is neglected in this calculation mode, unlike Bz. The components Bx and By affect only Zeeman effect, paramagnetic exchange, etc.\n" % SCRIPT)
+	# TODO: Anisotropic in-plane strain also requires full mode
+	if modelopts['axial'] is False or modelopts['bia'] is True:
+		ll_mode = 'full'
+	if modelopts['bia'] and modelopts['split'] != 0.0:
+		sys.stderr.write("Warning (%s): With BIA, the requested splitting will be applied only to certain momenta in order to lift degeneracies without causing unwanted asymmetries.\n" % SCRIPT)
+	modelopts['lattice_reg'] = get_config_bool('lattice_regularization')
+	if modelopts['lattice_reg'] is True:
+		sys.stderr.write("Warning (%s): It is recommended to disable lattice regularization using the configuration option 'lattice_regularization=false'.\n" % SCRIPT)
+	if ll_mode == 'full':
+		obsids.extend(["llavg", "llbymax"])
+		sys.stderr.write("Warning (%s): For bulk LL, the implementation of Landau-level mode '%s' is experimental. Please double check your results.\n" % (SCRIPT, ll_mode))
+
+	# Process LL mode dependent observables (LL index, etc.)
+	plotobs_apply_llmode(plotopts, ll_mode)
+
+	# Calculate symbolic hamiltonian
+	if ll_mode in ['sym', 'full']:
+		modelopts_hsym = modelopts.copy()
+		for k in ['obs', 'obs_prop', 'energy', 'neig', 'cpu', 'pot', 'return_eivec']:
+			if k in modelopts_hsym:
+				del modelopts_hsym[k]
+		kz = 0.0 if ks is None or len(ks) == 0 else ks[0].z() if isinstance(ks[0], Vector) else 0.0
+		h_sym = SymbolicHamiltonian(hbulk_split, (params,), modelopts_hsym, hmagn = True, kz = kz)
+	else:
+		h_sym = None
+
+	# Do diagonalization at k = 0, b = 0 to get CNP (e0)
+	data0 = lldiag.hbulk_ll0(params, modelopts, description='Calculating bulk LL dispersion (B=0)')
+	bandindices(DiagData([data0]), params=params)  # This stores band indices in data0
+	e0 = data0.get_eival0()
+
+	# Do diagonalization for all B values
+	data = lldiag.hbulk_ll(ll_mode, kbs, ll_max, h_sym, params, modelopts, list_kwds = {}, description = 'Calculating bulk LL dispersion', num_processes = num_cpus)
+	bandindices(data, params=params, e0=e0)
+
+	## Energy shift (TODO: Not very elegant)
+	if 'zeroenergy' in opts and opts['zeroenergy']:
+		e_ref = 0.0 if 'eshift' not in opts else opts['eshift']
+		eshift = data.set_zero_energy(e_ref)
+		if eshift is not None:
+			sys.stderr.write("Warning (%s): Energy shifted by %.3f meV. Experimental function: Other input and output energies may still refer to the unshifted energy.\n" % (SCRIPT, eshift))
+	elif 'eshift' in opts and opts['eshift'] != 0.0:
+		data.shift_energy(opts['eshift'])
+		sys.stderr.write("Warning (%s): Energy shifted by %.3f meV. Experimental function: Other input and output energies may still refer to the unshifted energy.\n" % (SCRIPT, opts['eshift']))
+
+	table_erange = erange  # if "filtertable" in sysargv else None
+
+	if data is None:
+		sys.stderr.write("ERROR (%s): No data.\n" % SCRIPT)
+		exit(2)
+
+	## Write Table
+	b = data.get_paramval()
+	tableo.disp("bdependence%s.csv" % outputid, data, params, erange = table_erange, observables = obsids, dependence = [b, "b", "T"])
+	plotobs = plotopts.get('obs')
+	tableo.disp_byband("bdependence%s.csv" % outputid, data, params, erange = erange, observable = plotobs)
+
+	## Write XML
+	xmlio.writefile("output%s.xml" % outputid, data, params, observables = obsids, caller = SCRIPT, options = opts, dependence = [b, "b", "T"], dependentoptions = [])
+
+	## Write plot
+	if len(bs) > 1:
+		ploto.bands_1d(data, filename = "bdependence%s.pdf" % outputid, showplot = False, erange = erange, paramstr = ploto.format_axis_label("$B$", "$\\mathrm{T}$"), **plotopts)
+	else:
+		sys.stderr.write("Warning (%s): For 0-dimensional arrays, skip plot.\n" % SCRIPT)
+
+	## Warning for unparsed arguments
+	unparsed = sysargv.unparsed_warning(color = sys.stderr.isatty())
+	if unparsed is not None:
+		sys.stderr.write("Warning (%s): The following marked command line arguments were not used: %s\n" % (SCRIPT, unparsed))
+
+	exit(0)
+
+if __name__ == '__main__':
+	main()
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/kdotpy-bulk.py b/kdotpy-v1.0.0/src/kdotpy/kdotpy-bulk.py
new file mode 100644
index 0000000000000000000000000000000000000000..a907518ff72be2b504bb4523148ea1318f227eb1
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/kdotpy-bulk.py
@@ -0,0 +1,217 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+from os import environ
+environ['OMP_NUM_THREADS'] = '1'
+import sys
+import os.path
+import numpy as np
+
+from .config import initialize_config, get_config_int, get_config_bool
+from .materials import initialize_materials
+from . import cmdargs
+from .momentum import VectorGrid, ZippedKB
+from .diagonalization import DiagData
+from .observables import all_observables, get_all_obsids
+from .bandalign import bandindices
+
+from .parallel import set_job_monitor, parallel_apply
+from .diagonalization import diagonalization as diag
+from .extrema import band_local_extrema, band_minima_maxima, print_band_extrema, print_gap_information
+from .bandtools import set_orbitaltype, set_disp_derivatives
+from . import symmetry
+
+from . import xmlio
+from . import tableo
+from . import ploto
+from . import postprocess
+
+sysargv = cmdargs.sysargv
+
+#### MAIN PROGRAM ####
+def main():
+	SCRIPT = os.path.basename(__file__)  # the filename of this script, without directory
+	scriptdir = os.path.dirname(os.path.realpath(__file__))
+	initialize_config()
+	initialize_materials()
+	numpy_printprecision = get_config_int('numpy_printprecision', minval = 0)
+	numpy_linewidth = get_config_int('numpy_linewidth', minval = 0)
+	np.set_printoptions(precision=numpy_printprecision, linewidth=numpy_linewidth)
+	ploto.initialize()
+
+	## Process command line arguments
+	params = cmdargs.params(kdim = 3)
+
+	ks = cmdargs.vectorvalues('k', onedim = True, twodim = True, threedim = True)
+	bs = cmdargs.vectorvalues('b', onedim = True, twodim = True, threedim = True, defaultaxis = 'z')
+	try:
+		kbs = ZippedKB(ks, bs)
+	except ValueError:
+		sys.stderr.write("ERROR (%s): Momentum k, magnetic field b, or both must be a constant.\n" % SCRIPT)
+		exit(1)
+	dependence = kbs.dependence()
+	job_monitor_limit = get_config_int('job_monitor_limit', minval = 0)
+	set_job_monitor(len(kbs) <= job_monitor_limit)
+
+	if isinstance(bs, VectorGrid) and not bs.zero():
+		sys.stderr.write("Warning (%s): The 'orbital part' of the magnetic field is neglected in this calculation mode. Only Zeeman effect, paramagnetic exchange, etc. are considered.\n" % SCRIPT)
+
+	opts = cmdargs.options()
+	plotopts = cmdargs.plot_options(format_args = (params, opts, kbs))
+	erange = cmdargs.erange()
+	curdir, outdir = cmdargs.outdir()  # changes dir as well
+	outputid = cmdargs.outputid(format_args = (params, opts, kbs))
+
+	## Modify parameters
+	params.l_barr = 0.0
+	params.nz = 1
+
+	## Define observables
+	obsids = get_all_obsids(kdim=3, ll=False, norb=params.norbitals, opts=opts)
+	all_observables.initialize(param = params, dimful = 'dimful_obs' in opts and opts['dimful_obs'])
+
+	modelopts_default = {'lattice_reg': False, 'split': 0.0, 'splittype': 'auto', 'ignorestrain': False,
+	                     'obs': obsids, 'axial': True, 'bia': False, 'obs_prop': all_observables, 'return_eivec': False}
+	modelopts = cmdargs.initialize_opts(opts, modelopts_default, {})
+	num_cpus = opts.get('cpu', 1)
+	modelopts['berry'] = erange if 'berry' in sysargv or 'berryx' in sysargv or 'berryy' in sysargv or 'berryz' in sysargv else False
+	if modelopts['bia'] and modelopts['split'] != 0.0:
+		sys.stderr.write("Warning (%s): With BIA, the requested splitting will be applied only to certain momenta in order to lift degeneracies without causing unwanted asymmetries.\n" % SCRIPT)
+	modelopts['lattice_reg'] = get_config_bool('lattice_regularization')
+	if modelopts['lattice_reg'] is True:
+		sys.stderr.write("Warning (%s): It is recommended to disable lattice regularization using the configuration option 'lattice_regularization=false'.\n" % SCRIPT)
+
+	data = DiagData(parallel_apply(diag.hbulk, kbs, (params,), f_kwds = modelopts, num_processes = num_cpus, description = 'Calculating bulk dispersion'), grid = kbs.get_grid())
+	bandindices(data, params = params)
+	set_orbitaltype(data)
+
+	## Energy shift (TODO: Not very elegant)
+	if 'zeroenergy' in opts and opts['zeroenergy']:
+		e_ref = 0.0 if 'eshift' not in opts else opts['eshift']
+		eshift = data.set_zero_energy(e_ref)
+		if eshift is not None:
+			sys.stderr.write("Warning (%s): Energy shifted by %.3f meV. Experimental function: Other input and output energies may still refer to the unshifted energy.\n" % (SCRIPT, eshift))
+	elif 'eshift' in opts and opts['eshift'] != 0.0:
+		data.shift_energy(opts['eshift'])
+		sys.stderr.write("Warning (%s): Energy shifted by %.3f meV. Experimental function: Other input and output energies may still refer to the unshifted energy.\n" % (SCRIPT, opts['eshift']))
+
+	## Symmetry test and symmetrization
+	if 'symmetrytest' in sysargv:
+		sys.stderr.write("Symmetry analysis...\n")
+		if 'split' in modelopts and modelopts['split'] != 0.0:
+			sys.stderr.write("Warning (%s): Nonzero splitting may reduce the symmetry group.\n" % SCRIPT)
+		symmetry.analyze(data)
+
+	if 'symmetrize' in sysargv:
+		data = data.symmetrize('xyz')
+		if 'symmetrytest' in sysargv:
+			print()
+			print("Symmetries after symmetrization:")
+			data.symmetry_test('x')
+			data.symmetry_test('y')
+			data.symmetry_test('z')
+			data.symmetry_test('xy')
+			data.symmetry_test('xyz')
+
+	## Derivatives
+	if data.grid is not None:
+		set_disp_derivatives(data, dedk = True, v = True)
+
+	## Extrema
+	if "minmax" in sysargv:
+		band_minima_maxima(data)
+	if ("extrema" in sysargv or "localminmax" in sysargv or "minmaxlocal" in sysargv) and dependence != 'b':
+		local_extrema = band_local_extrema(data)
+		print_band_extrema(local_extrema)
+		print_gap_information(local_extrema, data)
+	else:
+		local_extrema = None
+
+	## Density of states
+	if "dos" in sysargv and dependence == 'k':
+		idos, energies = postprocess.dos_k(params, data, erange, outputid, opts, plotopts, energies = None)
+	else:
+		idos, energies = None, None
+
+	## Density of states by band
+	if "banddos" in sysargv or "dosbyband" in sysargv:
+		if dependence == 'k':
+			postprocess.banddos_k(params, data, erange, outputid, opts, plotopts, energies = energies)
+		else:
+			sys.stderr.write("Warning (%s): DOS by band available only for momentum (k) dispersions.\n" % SCRIPT)
+
+	## Dispersion / B dependence:
+	dependencestr = "bdependence" if dependence == 'b' else 'dispersion'
+	dependencedata = [data.get_paramval(), "b", "T"] if dependence == 'b' else None
+	## Write Table
+	sys.stderr.write("Writing data (csv) ...\n")
+	tableo.disp("%s%s.csv" % (dependencestr, outputid), data, params, observables = obsids, dependence = dependencedata)
+	if len(data.shape) in [1, 2, 3]:
+		plotobs = plotopts.get('obs')
+		tableo.disp_byband("%s%s.csv" % (dependencestr, outputid), data, params, erange = erange, observable = plotobs)
+	if local_extrema is not None:
+		tableo.extrema("extrema%s.csv" % outputid, local_extrema)
+
+	## Write XML
+	sys.stderr.write("Writing data (xml) ...\n")
+	xmlio.writefile(
+		"output%s.xml" % outputid, data, params, observables = obsids,
+		caller = SCRIPT, options = opts, bands_extrema = local_extrema,
+		dependence = dependencedata
+	)
+
+	## Write plot
+	if len(data.shape) == 1 and len(data) > 1:
+		ploto.bands_1d(data, filename = "%s%s.pdf" % (dependencestr, outputid), showplot = False, erange = erange, **plotopts)
+	elif len(data.shape) == 2:
+		ploto.bands_2d(data, filename = "%s2d%s.pdf" % (dependencestr, outputid), showplot = False, erange = erange, extrema = local_extrema, **plotopts)
+	else:
+		sys.stderr.write("Warning (%s): For 0-, 2-, or 3-dimensional arrays, skip plot.\n" % SCRIPT)
+
+	## Warning for unparsed arguments
+	unparsed = sysargv.unparsed_warning(color = sys.stderr.isatty())
+	if unparsed is not None:
+		sys.stderr.write("Warning (%s): The following marked command line arguments were not used: %s\n" % (SCRIPT, unparsed))
+
+	exit(0)
+
+if __name__ == '__main__':
+	main()
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/kdotpy-compare.py b/kdotpy-v1.0.0/src/kdotpy/kdotpy-compare.py
new file mode 100644
index 0000000000000000000000000000000000000000..5582f9afec21376c0cf99f2ae171a58eb08bd50b
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/kdotpy-compare.py
@@ -0,0 +1,324 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import sys
+import os
+import os.path
+import re
+
+from matplotlib import use as mpluse
+mpluse('pdf')
+import matplotlib.pyplot as plt
+
+from .config import initialize_config, cmdargs_config
+from .materials import initialize_materials
+from . import cmdargs
+from . import xmlio
+from . import ploto
+
+SCRIPT = os.path.basename(__file__)  # the filename of this script, without directory
+
+def main():
+	initialize_config()
+	initialize_materials()
+	ploto.initialize()
+
+	erange = cmdargs.erange()  # plot range (energy)
+
+	filenames = []
+	filenameset = 0
+	filenamesets = []
+	defaultfilename = "output.xml"
+	accept_default_files = False
+	# If the argument '--' appears in the command line, consider only the arguments
+	# after the last '--' as input files
+	argstart = 1
+	for j in range(1, len(sys.argv)):
+		if sys.argv[j] == '--':
+			argstart = j
+	for a in sys.argv[argstart:]:
+		if os.path.isfile(a):
+			if a.endswith(".tar.gz") or a.endswith(".tar"):
+				tar_contents = xmlio.find_in_tar(a, "output*.xml")
+				if tar_contents is not None and tar_contents != []:
+					filenames.extend(tar_contents)
+					filenamesets.extend([filenameset for _ in tar_contents])
+			else:
+				filenames.append(a)
+				filenamesets.append(filenameset)
+		elif os.path.isdir(a):
+			ls = os.listdir(a)
+			filenames_thisdir = []
+			for fname in ls:
+				m = re.match(r"output.*\.xml(\.gz)?", fname)
+				if m is not None:
+					filenames_thisdir.append(os.path.join(a, fname))
+				m = re.match(r"data.*\.tar\.gz", fname)
+				if m is not None:
+					tar_contents = xmlio.find_in_tar(os.path.join(a, fname), "output*.xml")
+					if tar_contents is not None and tar_contents != []:
+						filenames_thisdir.extend(tar_contents)
+			if len(filenames_thisdir) > 0:
+				filenames.extend(filenames_thisdir)
+				filenamesets.extend([filenameset for _ in filenames_thisdir])
+			else:
+				sys.stderr.write("Warning (%s): No data files \"output*.xml\" found in directory %s\n" % (SCRIPT, a))
+		elif a == 'vs':
+			filenameset += 1
+
+	if "verbose" in sys.argv:
+		print("File sets:")
+		for fname, fnum in zip(filenames, filenamesets):
+			print("%i: %s" % (fnum + 1, fname))
+
+	if len(filenames) == 0:
+		filenameset = 0  # disable file name sets in case nothing is found
+		if not accept_default_files:
+			sys.stderr.write("ERROR (%s): No data files\n" % SCRIPT)
+			exit(2)
+		elif os.path.isfile(defaultfilename):
+			sys.stderr.write("Warning (%s): No data files specified. Using default \"%s\"\n" % (SCRIPT, defaultfilename))
+			filenames = [defaultfilename]
+		else:
+			ls = os.listdir(".")
+			for fname in ls:
+				m = re.match(r"output.*\.xml", fname)
+				if m is not None:
+					filenames.append(fname)
+			if len(filenames) == 0:
+				sys.stderr.write("ERROR (%s): No data files found.\n" % SCRIPT)
+				exit(2)
+			else:
+				sys.stderr.write("Warning (%s): No data files specified. Using default \"output.##.xml\"; %i files\n" % (SCRIPT, len(filenames)))
+
+	if len(filenames) < 2:
+		sys.stderr.write("Warning (%s): For comparison, please specify more than one file.\n" % SCRIPT)
+
+	if filenameset == 0:
+		filenamesets = list(range(0, len(filenamesets)))  # treat each file separately
+	filesets = []
+	if len(filenamesets) == 0:
+		sys.stderr.write("ERROR (%s): No data.\n" % SCRIPT)
+		exit(2)
+	for i in range(0, max(filenamesets) + 1):
+		fileset = []
+		for j in range(0, len(filenames)):
+			if filenamesets[j] == i:
+				fileset.append(filenames[j])
+		if len(fileset) > 0:
+			filesets.append(fileset)
+
+	print("Files:")
+	for fsj, fileset in enumerate(filesets):
+		print("Set %i:" % (fsj + 1))
+		for f in fileset:
+			print(("%s:%s" % f) if isinstance(f, tuple) else f)
+
+	## Get data of first plot
+	data, params, dependence = xmlio.readfiles(filesets[0])
+	if len(data) == 0:
+		sys.stderr.write("ERROR (%s): No data.\n" % SCRIPT)
+		exit(2)
+	if not (dependence == 'k' or dependence == 'b' or (isinstance(dependence, list) and dependence[1] == 'b')):
+		sys.stderr.write("ERROR (%s): Unexpected dependence type.\n" % SCRIPT)
+		exit(1)
+
+	# Reload configuration options from command line, because they may have been
+	# overwritten by readfiles.
+	cmdargs_config()
+
+	## Get plot options
+	opts = cmdargs.options(axial_automatic = 'ignore')
+	vgrid = {} if data.grid is None else data.grid  # VectorGrid for plot title formatting
+	plotopts = cmdargs.plot_options(format_args = (params, opts, vgrid))
+	curdir, outdir = cmdargs.outdir()
+	outputid = cmdargs.outputid(format_args = (params, opts, vgrid))  # output filename modifier
+
+	## Get observable
+	obsids = []
+	for d in data:
+		if d.obsids is not None:
+			obsids = d.obsids
+			break
+
+	obs = plotopts['obs']
+	if "obs" in sys.argv:
+		if not (obs in obsids or obs == "orbitalrgb" or obs.startswith("sigma") or obs.startswith("subband")):
+			plotopts['obs'] = None
+
+	distinguish_by_color = (obs is None)
+
+	if len(filesets) < 2:
+		markers = ['.']
+	elif len(filesets) == 2:
+		markers = ['+', 'x'] if obs is None else ['^', 'v']
+	else:
+		markers = ['p', 's', '^', 'v', '<', '>']
+	nm = len(markers)
+	colors = ['b', 'r', 'g', 'y', 'm', 'c']
+	nc = len(colors)
+
+	k_template = None
+	if "select" in sys.argv:
+		argn = sys.argv.index("select")
+		try:
+			sel_component = sys.argv[argn + 1]
+			sel_value = float(sys.argv[argn + 2])
+		except:
+			sys.stderr.write("ERROR (%s): Argument \"select\" must be followed by k, kx, ky, or kphi and a number.\n" % SCRIPT)
+			exit(1)
+		# sel_idx, sel_kval = k_select(data.get_momenta(), sel_component, sel_value)
+		sel_idx, sel_kval = data.get_momentum_grid().select(sel_component, sel_value)
+		data = [data[j] for j in sel_idx]
+		if len(data) == 0:
+			sys.stderr.write("Warning (%s): No data for this selection.\n" % SCRIPT)
+		else:
+			vtype = data[0].k.vtype
+			for d in data:
+				# d.k = k_represent_as(d.k, k_template)
+				d.k = d.k.astype(vtype)
+
+	if "sortdata" in sys.argv:
+		data.sort_by_grid()
+
+	## Energy shift (TODO: Not yet implemented)
+	if 'zeroenergy' in sys.argv or 'eshift' in sys.argv or 'energyshift' in sys.argv:
+		sys.stderr.write("Warning (%s): Energy shift requested, but not implemented for this script.\n" % SCRIPT)
+
+	if len(data) == 0:
+		sys.stderr.write("Warning (%s): Nothing to be plotted for file set 1.\n" % (SCRIPT,))
+	elif dependence == 'k':
+		fig = ploto.bands_1d(data, filename = "dispersion%s.pdf" % outputid, showplot = False, erange = erange, markers = (colors[0], markers[0]), **plotopts)
+	elif isinstance(dependence, list):
+		if dependence[1] == 'b':
+			paramtex = ploto.format_axis_label("$B$", "$\\mathrm{T}$")
+		elif dependence[2] != "":
+			paramtex = ploto.format_axis_label("$%s$" % dependence[1], "$\\mathrm{%s}$" % dependence[2])
+		else:
+			paramtex = "$%s$" % dependence[1]
+		fig = ploto.bands_1d(data, filename = "%sdependence%s.pdf" % (dependence[1], outputid), showplot = False, erange = erange, markers = (colors[0], markers[0]), paramstr = paramtex, **plotopts)
+	else:
+		raise ValueError("Illegal value for variable dependence")
+
+	# Prevent displaying multiple titles
+	if 'title' in plotopts:
+		plotopts['title'] = " "
+
+	### add further plots
+	for j in range(1, len(filesets)):
+		data, params, dependence = xmlio.readfiles(filesets[j], basedir = curdir)
+
+		# Reload configuration options from command line, because they may have been
+		# overwritten by readfiles. In a slightly hack-ish manner, we have to do this
+		# after every call to readfiles. TODO: This is not elegant. Also different
+		# configurations in the XML files may give unexpected results.
+		cmdargs_config()
+
+		if distinguish_by_color:
+			marker = (colors[j % nc], markers[j % nm])
+		else:
+			marker = markers[j % nm]
+
+		if "select" in sys.argv:
+			argn = sys.argv.index("select")
+			try:
+				sel_component = sys.argv[argn + 1]
+				sel_value = float(sys.argv[argn + 2])
+			except:
+				sys.stderr.write("ERROR (%s): Argument \"select\" must be followed by k, kx, ky, or kphi and a number.\n" % SCRIPT)
+				exit(1)
+			# sel_idx, sel_kval = k_select(data.get_momenta(), sel_component, sel_value)
+			sel_idx, sel_kval = data.get_momentum_grid().select(sel_component, sel_value)
+			data = [data[j] for j in sel_idx]
+			if len(data) == 0:
+				sys.stderr.write("Warning (%s): No data for this selection.\n" % SCRIPT)
+			else:
+				if k_template is None:
+					vtype = data[0].k.vtype
+				for d in data:
+					# d.k = k_represent_as(d.k, k_template)
+					d.k = d.k.astype(vtype)
+
+		if "sortdata" in sys.argv:
+			data.sort_by_grid()
+
+		if len(data) == 0:
+			sys.stderr.write("Warning (%s): Nothing to be plotted for file set %i.\n" % (SCRIPT, j + 1))
+		elif dependence == 'k':
+			fig = ploto.bands_1d(data, filename = "dispersion%s.pdf" % outputid, showplot = False, erange = erange, markers = marker, addtofig = fig, **plotopts)
+		elif isinstance(dependence, list):
+			if dependence[1] == 'b':
+				paramtex = ploto.format_axis_label("$B$", "$\\mathrm{T}$")
+			elif dependence[2] != "":
+				paramtex = ploto.format_axis_label("$%s$" % dependence[1], "$\\mathrm{%s}$" % dependence[2])
+			else:
+				paramtex = "$%s$" % dependence[1]
+			fig = ploto.bands_1d(data, filename = "%sdependence%s.pdf" % (dependence[1], outputid), showplot = False, erange = erange, markers = marker, paramstr = paramtex, addtofig = fig, **plotopts)
+		else:
+			raise ValueError("Illegal value for variable dependence")
+
+	if ('filelegend' in sys.argv or 'legend' in sys.argv) and len(data) > 0:
+		legendlabels = []
+		if 'legend' in sys.argv:
+			argn = sys.argv.index('legend')
+			for argi in range(argn + 1, len(sys.argv) - 1, 2):
+				if sys.argv[argi] == 'label':
+					legendlabels.append(sys.argv[argi + 1])
+				else:
+					break
+		if len(legendlabels) != 0 and len(legendlabels) != len(filesets):
+			sys.stderr.write("Warning (%s): Incorrect number of legend labels given.\n" % SCRIPT)
+		ax = fig.gca()
+		allplots = []
+		alllabels = []
+		for j in range(0, len(filesets)):
+			color = colors[j % nc] if distinguish_by_color else 'b'
+			thisplot, = plt.plot([], [], color + markers[j % nm])
+			allplots.append(thisplot)
+			fs = filesets[j]
+			alllabels.append(legendlabels[j] if j < len(legendlabels) else (fs[0][0] if isinstance(fs[0], tuple) else fs[0]) + ('' if len(fs) == 1 else ' (+%i)' % (len(fs) - 1)))
+		plt.legend(handles = allplots, labels = alllabels, loc = 'lower right', fontsize = 'small', markerscale = 0.6)
+		plt.savefig(("dispersion%s.pdf" % outputid) if dependence == 'k' else ("%sdependence%s.pdf" % (dependence[1], outputid)))
+	exit(0)
+
+
+if __name__ == '__main__':
+	main()
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/kdotpy-config.py b/kdotpy-v1.0.0/src/kdotpy/kdotpy-config.py
new file mode 100644
index 0000000000000000000000000000000000000000..1bddeb2fe429c8056a380b9dadc3539c6231b9ae
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/kdotpy-config.py
@@ -0,0 +1,127 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import sys
+import os
+from . import config
+
+config_help_text = \
+"""Syntax:
+  kdotpy config file
+  kdotpy config edit
+  kdotpy config list    (alias: kdotpy config show)
+  kdotpy config all     (alias: kdotpy config full, kdotpy config fulllist)
+  kdotpy config help <item>
+  kdotpy config reset <item>
+  kdotpy config set <item>=<value>
+  kdotpy config <item>
+  kdotpy config <item>=<value>
+
+<item> can be any configuration item
+<value> is the value that <item> is set to
+You can combine multiple configuration items with semicolons and single quotes,
+for example:
+  kdotpy config 'fig_lmargin=10;fig_rmargin=5'
+"""
+
+
+#### MAIN PROGRAM ####
+def main():
+	sourcedir = os.path.dirname(os.path.realpath(__file__))
+	helpfile = os.path.join(sourcedir, 'docs', 'helpfile.txt')
+	config.initialize_config(warn_deprecated=False)
+
+	if len(sys.argv) <= 2:
+		sys.stderr.write("ERROR (kdotpy-config.py): Additional arguments required.\n")
+		sys.stdout.write(config_help_text)
+		exit(1)
+	arg1 = sys.argv[2].lower()
+
+	if arg1 == 'file':
+		for f in config.get_configfiles():
+			print(f)
+	elif arg1 == 'edit':
+		configfiles = config.get_configfiles()
+		if len(configfiles) == 0:
+			sys.stderr.write("ERROR (kdotpy-config.py): Configuration file does not exist.\n")
+			exit(1)
+		config.edit_configfile(configfiles[-1])
+	elif arg1 == 'reset':
+		config_keys = [key.split('=')[0] for arg in sys.argv[3:] for key in arg.split(";")]
+		config.check_config(config_keys)
+		for key in config_keys:
+			config.reset_config(key)  # invalid and deprecated keys implicitly ignored
+		config.write_config(deprecate=config_keys)
+	elif arg1 == 'set':
+		config_keys = [key.split('=')[0] for arg in sys.argv[3:] for key in arg.split(";")]
+		config.parse_config(config_keys)
+		config.write_config()
+	elif arg1 == 'help':
+		config_keys = [key.split('=')[0] for arg in sys.argv[3:] for key in arg.split(";")]
+		if len(config_keys) == 0:
+			sys.stdout.write(config_help_text)
+		else:
+			config.config_help(config_keys, helpfile=helpfile)
+	elif arg1 in ['list', 'show']:
+		all_config = config.get_all_config()
+		for key, val in all_config.items():
+			print("{}={}".format(key, val))
+	elif arg1 in ['all', 'full', 'fulllist']:
+		all_config = config.get_all_config(omit_default=False)
+		for key, val in all_config.items():
+			print("{}={}".format(key, val))
+	else:
+		config_keys = [key for arg in sys.argv[2:] for key in arg.split(";")]
+		set_keys = [key for key in config_keys if '=' in key]
+		get_keys = [key for key in config_keys if '=' not in key]
+
+		if len(set_keys) > 0:
+			config.parse_config(set_keys)
+			config.write_config()
+		config.check_config(get_keys)
+		for key in config_keys:  # get_keys and set_keys
+			key = key.split('=')[0]
+			val = config.get_config(key)
+			if val is not None:
+				print("{}={}".format(key, val))
+
+if __name__ == '__main__':
+	main()
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/kdotpy-ll.py b/kdotpy-v1.0.0/src/kdotpy/kdotpy-ll.py
new file mode 100644
index 0000000000000000000000000000000000000000..1b588fe9b67e80c03ea3898db91b88b1a3a45900
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/kdotpy-ll.py
@@ -0,0 +1,406 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+from os import environ
+environ['OMP_NUM_THREADS'] = '1'
+import sys
+import os.path
+import numpy as np
+
+from .config import initialize_config, get_config, get_config_num, get_config_int, get_config_bool
+from .materials import initialize_materials
+from .errorhandling import UnexpectedErrorHandler
+from . import cmdargs
+from .momentum import VectorGrid, ZippedKB, get_momenta_from_locations
+from .observables import all_observables, get_all_obsids, plotobs_apply_llmode
+from .symbolic import SymbolicHamiltonian
+from .hamiltonian import hz_sparse_split
+from .bandalign import bandindices
+from .cnp import estimate_charge_neutrality_point
+from .bandtools import get_overlap_eivec
+from . import selfcon
+
+from .potential import gate_potential_from_opts, print_potential, read_potential, potential_file_overwrite_warning
+from .parallel import set_job_monitor
+from .diagonalization import lldiagonalization as lldiag
+from .diagonalization import diagsolver as dsolv
+from .diagonalization import DiagData, DiagDataPoint
+from .models import ModelLL
+
+from . import wf
+from . import xmlio
+from . import tableo
+from . import ploto
+from . import postprocess
+
+sysargv = cmdargs.sysargv
+
+#### MAIN PROGRAM ####
+def main():
+	SCRIPT = os.path.basename(__file__)  # the filename of this script, without directory
+	scriptdir = os.path.dirname(os.path.realpath(__file__))
+	initialize_config()
+	initialize_materials()
+	numpy_printprecision = get_config_int('numpy_printprecision', minval = 0)
+	numpy_linewidth = get_config_int('numpy_linewidth', minval = 0)
+	np.set_printoptions(precision=numpy_printprecision, linewidth=numpy_linewidth)
+	ploto.initialize()
+	magn_epsilon = get_config_num('magn_epsilon')
+
+	## Process command line arguments
+	ll_mode = 'legacy' if 'lllegacy' in sysargv else 'full' if 'llfull' in sysargv else 'sym'  # possible options: 'legacy', 'sym', 'full'
+	params = cmdargs.params(kdim = 2)
+	ks = cmdargs.vectorvalues('k', onedim = True, twodim = True)
+	bs = cmdargs.vectorvalues('b', onedim = True, twodim = True, threedim = True, defaultaxis = 'z', magn_epsilon = magn_epsilon)
+	try:
+		kbs = ZippedKB(ks, bs)
+	except ValueError:
+		sys.stderr.write("ERROR (%s): Momentum k, magnetic field b, or both must be a constant.\n" % SCRIPT)
+		exit(1)
+	if kbs.dependence() != 'b':
+		sys.stderr.write("ERROR (%s): The dependence must be on magnetic field b.\n" % SCRIPT)
+		exit(1)
+		# Note: Because of this safeguard, we can keep using the VectorGrid instance bs troughout.
+	job_monitor_limit = get_config_int('job_monitor_limit', minval = 0)
+	set_job_monitor(len(bs) <= job_monitor_limit)
+
+	opts = cmdargs.options(axial_automatic = True)
+	plotopts = cmdargs.plot_options(format_args = (params, opts, kbs))
+	erange = cmdargs.erange()
+	curdir, outdir = cmdargs.outdir()  # changes dir as well
+	outputid = cmdargs.outputid(format_args = (params, opts, kbs))
+	bandalign_opts = cmdargs.bandalign(directory = curdir)
+
+	## Define observables
+	obsids = get_all_obsids(kdim=2, ll=True, norb=params.norbitals, opts=opts)
+	all_observables.initialize(param = params, dimful = 'dimful_obs' in opts and opts['dimful_obs'])
+
+	modelopts_default = {'energy': 0.0, 'neig': 50, 'lattice_reg': False, 'split': 0.0, 'splittype': 'auto',
+	                     'ignorestrain': False, 'obs': None, 'axial': True, 'obs_prop': all_observables, 'bia': False,
+	                     'return_eivec': False, 'custom_interface_length': None}
+	mapping = {'targetenergy': 'energy'}
+	modelopts = cmdargs.initialize_opts(opts, modelopts_default, mapping)
+	num_cpus = opts.get('cpu', 1)
+	ll_max = opts.get('ll_max', 30)
+	params.ny = ll_max + 3
+	# TODO: Anisotropic in-plane strain also requires full mode
+	if (not bs.is_vertical()) or modelopts['axial'] is False or modelopts['bia'] is True:
+		if ll_mode != 'full':
+			sys.stderr.write("Warning (%s): Automatically switch to 'full' LL mode.\n" % SCRIPT)
+		ll_mode = 'full'
+	if modelopts['bia'] and ll_mode != 'full':
+		sys.stderr.write("Warning (%s): BIA can only be treated in 'full' LL mode.\n" % SCRIPT)
+	if modelopts['bia'] and modelopts['split'] != 0.0:
+		sys.stderr.write("Warning (%s): With BIA, the requested splitting will be applied only to certain momenta in order to lift degeneracies without causing unwanted asymmetries.\n" % SCRIPT)
+	modelopts['lattice_reg'] = get_config_bool('lattice_regularization')
+	if modelopts['lattice_reg'] is True:
+		sys.stderr.write("Warning (%s): It is recommended to disable lattice regularization using the configuration option 'lattice_regularization=false'.\n" % SCRIPT)
+	if 'lloverlaps' in sysargv or 'llobs' in sysargv:
+		if ll_mode == 'full':
+			obsids.extend(['ll[%i]' % l for l in range(-2, ll_max + 1)])
+			opts['llobs'] = True
+		else:
+			sys.stderr.write("Warning (%s): Option 'llobs' (alias 'lloverlaps') can only be used in 'full' LL mode.\n" % SCRIPT)
+	if ll_mode == 'full':
+		obsids.extend(["llavg", "llbymax", "llmod2", "llmod4"])
+	if modelopts['custom_interface_length'] is not None:
+		obsids.extend(["custominterface[%i]" % modelopts['custom_interface_length'],
+		               "custominterfacechar[%i]" % modelopts['custom_interface_length']])
+
+	# Process LL mode dependent observables (LL index, etc.)
+	plotobs_apply_llmode(plotopts, ll_mode)
+
+	# Initialize solver
+	modelopts_solver = modelopts.copy()
+	modelopts_solver['erange'] = erange
+	modelopts_solver['ll_mode'] = ll_mode
+	modelopts_solver['ll_max'] = ll_max
+	solver = dsolv.solverconfig(num_cpus, modelopts_solver, SCRIPT)
+	modelopts['solver'] = solver
+
+	energies = {}
+	pot = None
+
+	# Calculate symbolic hamiltonian
+	if ll_mode in ['sym', 'full']:
+		modelopts_hsym = modelopts.copy()
+		for k in ['obs', 'obs_prop', 'energy', 'neig', 'cpu', 'pot', 'return_eivec', 'custom_interface_length']:
+			if k in modelopts_hsym:
+				del modelopts_hsym[k]
+		h_sym = SymbolicHamiltonian(hz_sparse_split, (params,), modelopts_hsym, hmagn = True)
+	else:
+		h_sym = None
+
+	if 'potentialfile' in opts:
+		pot = read_potential(params, opts['potentialfile'], directory = curdir, kbs = kbs)
+
+	if "selfcon" in sysargv:
+		print("Modern OOP self-consistent Hartree")
+		scopts_default = {'max_iterations': 10, 'min_iterations': 0, 'target_accuracy': 0.01, 'time_step': 0.9, 'num_cpus': 1}  # Apply potential
+		mapping = {'selfcon_max_iterations': 'max_iterations', 'selfcon_accuracy': 'target_accuracy', 'selfcon_weight': 'time_step', 'cpu': 'num_cpus'}
+		scopts = cmdargs.initialize_opts(opts, scopts_default, mapping)
+		scopts['erange'] = erange
+		scopts['outputid'] = outputid
+		scopts['ll_mode'] = ll_mode
+		scopts['ll_max'] = ll_max
+		scopts['h_sym'] = h_sym
+
+		potopts_default = {'v_inner': None, 'v_outer': None, 'cardens': None, 'n_depletion': None, 'l_depletion': None, 'efield': None, 'n_offset': None, 'n_bg': None, 'custom_bc': None}
+		mapping = {'vgate': 'v_outer'}
+		potopts = cmdargs.initialize_opts(opts, potopts_default, mapping)
+
+		if get_config_bool('selfcon_full_diag') and ll_mode != 'full':
+			sys.stderr.write("ERROR (%s): The self-consistent calculation with full diagonalization currently supports LL mode 'full' only. Either use the command argument 'llfull' to explicitly set LL mode to 'full', or disable full diagonalization by setting 'selfcon_full_diag=false' in the configuration.\n" % SCRIPT)
+			exit(1)
+
+		selfcon_solver = selfcon.SelfConSolverLLFullDiag if get_config_bool('selfcon_full_diag') else selfcon.SelfConSolverLL
+		scs = selfcon_solver(
+			kbs, params, modelopts=modelopts, bandalign_opts=bandalign_opts,
+			opts=opts, **scopts)
+		scs.init_potential(potential = pot, **potopts)
+		scs.run()
+		pot = scs.get_potential()
+		energies.update(**scs.special_energies)
+		opts['cardens'] = scs.cardens
+
+		bzval = bs.get_values('bz')
+		densz = scs.get_densityz_dict(qdens=True)
+		ploto.densityz(
+			params, densz, filename = "densz%s.pdf" % outputid, legend = True,
+			title = '$B_z = %.3f$ T', title_val = bzval)
+		tableo.densityz(
+			params, densz, filename = "densz%s.csv" % outputid, xval = bzval,
+			xlabel = "B_z", xunit = "T")
+
+	elif 'vgate' in opts or 'vsurf' in opts or 'v_outer' in opts or 'v_inner' in opts:
+		pot = gate_potential_from_opts(params, opts)
+
+	if pot is not None:
+		print("Electrostatic potential:")
+		print_potential(params, pot)
+		ploto.q_z(params, pot, filename = "potential%s.pdf" % outputid, ylabel = "V", yunit = "meV", text = "Potential energy (electron)")
+		potential_file_overwrite_warning("potential%s.csv" % outputid, opts.get('potentialfile'), directory = curdir)
+		if pot.ndim == 1:
+			tableo.q_z("potential%s.csv" % outputid, params, pot, precision = 8, clabel = 'potential', units='meV')
+		elif pot.ndim == 2:
+			bzval = bs.get_values('bz')
+			zval = params.zvalues_nm()
+			tableo.simple2d(
+				"potential%s.csv" % outputid, bzval, zval, pot,
+				float_precision=(8, 'g'), clabel='potential(B, z)',
+				axislabels=["B_z", "z"], axisunits=["T", "nm"],
+				datalabel='V', dataunit='meV'
+			)
+
+	## Plots of parameters as function of z
+	if "plotfz" in sysargv or "plotqz" in sysargv:
+		postprocess.q_z(params, outputid, pot=pot, legend="legend" in sysargv)
+
+	# Prepare parameter values (generic, k = 0)
+	modelopts['pot'] = pot
+	modelopts_k0 = modelopts.copy()
+	modelopts_k0['return_eivec'] = True
+	modelopts_k0['erange'] = erange
+	# solver = dsolv.solverconfig(num_cpus, modelopts_k0)
+	# modelopts_k0['solver'] = solver  # Append the solver to the model options to get used by diagonalizers
+	del modelopts_k0['erange']
+	if 'obs' in modelopts_k0:
+		del modelopts_k0['obs']
+	if isinstance(pot, np.ndarray) and pot.ndim == 2:
+		modelopts_k0['pot'] = pot[0]
+		if 'pot' in modelopts:
+			del modelopts['pot']
+		list_kwds = {'pot': pot}
+	else:
+		modelopts_k0['pot'] = pot
+		modelopts['pot'] = pot
+		list_kwds = {}
+
+	# Calculate bands at k = 0
+	diagdata_k0 = lldiag.hll_k0(ll_mode, ll_max, h_sym, params, modelopts_k0, description = "Calculating bands (k=0)...\n", return_eivec = True)
+	e0 = estimate_charge_neutrality_point(params, data=diagdata_k0)
+
+	overlap_eivec = None
+	if 'overlaps' in sysargv:
+		overlap_subbands = ['E1+', 'E1-', 'H1+', 'H1-', 'H2+', 'H2-', 'L1+', 'L1-']
+		overlap_eivec = get_overlap_eivec(diagdata_k0, overlap_subbands, obs = plotopts.get('obs'))
+		if overlap_eivec is not None:
+			obsids.extend(sorted([bt for bt in overlap_eivec]))
+			modelopts['obs'] = obsids
+
+	# Prepare parameter values (generic)
+	modelopts_bdep = modelopts.copy()
+	if ll_mode in ['sym', 'full']:
+		modelopts_bdep['orbital_magn'] = False
+		modelopts_bdep['berry'] = erange if ('berry' in sysargv or 'chern' in sysargv or 'hall' in sysargv) else False
+		if 'transitions' in opts and (opts['transitions'] is not False):
+			modelopts_bdep['transitions'] = opts['transitions']
+			modelopts_bdep['transitions_range'] = opts['transitionsrange']
+	modelopts_bdep['obs'] = obsids
+	modelopts_bdep['overlap_eivec'] = overlap_eivec
+	modelopts_bdep['erange'] = erange
+	modelopts_bdep['ll_mode'] = ll_mode
+	modelopts_bdep['ll_max'] = ll_max
+	if 'densityz' in sysargv:
+		modelopts_bdep['return_eivec'] = True
+	del modelopts_bdep['erange']
+
+	## Plot wave functions (parse arguments)
+	if "plotwf" in sysargv:  # works for 'sym' and 'full'
+		wfstyle, wflocations = cmdargs.plotwf()
+		wflocations = get_momenta_from_locations(kbs, wflocations)
+		modelopts_bdep['wflocations'] = wflocations
+	else:
+		wfstyle = None
+		wflocations = None
+	modelopts_bdep['params'] = params
+	if not bs.is_vertical():
+		# Note: if magnetic field has in-plane components, the magnetic field is not just an additive part in the full
+		# Hamiltonian, k eA mixing terms need to treated correctly. Thus, we have to calculate a symbolic Hamiltonian
+		# for each magnetic field point. For the above helper diagonalization (k=0), the approach to calculate h_sym is
+		# valid, as it is only evaluated at B=(0,0,0). Be careful about naive optimization for constant in-plane fields,
+		# which in principle may use a single h_sym, but special care has to be taken to split the field components
+		# between the 'b0' and 'h_magn' part of the SymbolicHamiltonian!
+		h_sym = None
+		modelopts_bdep['h_sym_opts'] = modelopts_hsym
+	modelopts_bdep['h_sym'] = h_sym
+	data = DiagData([DiagDataPoint(0, paramval=b, grid_index=i) for i, b in enumerate(bs)], grid=bs)
+	data.diagonalize(ModelLL(modelopts_bdep), solver, list_kwds)
+
+	## Determine band indices (split by LL index); not for full mode
+	with UnexpectedErrorHandler("Warning (%s): Unexpected error during band alignment.\n" % SCRIPT):
+		if ll_mode in ['legacy', 'sym'] and diagdata_k0 is not None:
+			for lln in range(-2, ll_max + 1):
+				if 'verbose' in sys.argv:
+					print("LL %i:" % lln)
+				data_lln = data.select_llindex(lln)
+				data_lln.set_char(diagdata_k0, llindex=lln, eival_accuracy = solver.eival_accuracy)
+				# No change of precision needed, as we match from a true subset (same eivals):
+				data.set_char(data_lln, llindex=lln)
+			# For band indices, do the iteration over LLs internally.
+			b_idx = bandindices(data, input_data=diagdata_k0, params=params, **bandalign_opts)
+		elif ll_mode == 'full' and diagdata_k0 is not None:
+			# If e0 (CNP) has not been given as an argument in bandalign_opts,
+			# use the automatic value e0 defined above if it is defined. In that
+			# case, also reset g0. The value e0 is needed in full LL mode,
+			# because diagdata_k0 cannot be used due to LL degeneracy for B = 0,
+			# unlike legacy/symbolic LL mode.
+			if bandalign_opts.get('e0') is None and e0 is not None:
+				bandalign_opts['e0'] = e0
+				bandalign_opts['g0'] = None
+			b_idx = bandindices(data, params=params, auto_cnp=False, **bandalign_opts)
+			data.set_char(diagdata_k0, eival_accuracy = solver.eival_accuracy)
+
+	## Energy shift (TODO: Not very elegant)
+	if 'zeroenergy' in opts and opts['zeroenergy']:
+		e_ref = 0.0 if 'eshift' not in opts else opts['eshift']
+		eshift = data.set_zero_energy(e_ref)
+		if eshift is not None:
+			sys.stderr.write("Warning (%s): Energy shifted by %.3f meV. Experimental function: "
+			                 "Other input and output energies may still refer to the unshifted energy.\n" % (SCRIPT, eshift))
+	elif 'eshift' in opts and opts['eshift'] != 0.0:
+		data.shift_energy(opts['eshift'])
+		sys.stderr.write("Warning (%s): Energy shifted by %.3f meV. Experimental function: "
+		                 "Other input and output energies may still refer to the unshifted energy.\n" % (SCRIPT, opts['eshift']))
+
+	## Wave functions
+	if "plotwf" in sysargv:
+		wf.twodim(data, params, wfstyle = wfstyle, wflocations = wflocations,
+		          filename = "wfs%s" % outputid, erange = erange, remember_eivec = True,
+		          dependence = 'b', ll_full = (ll_mode == 'full'))
+
+	## Write Table
+	table_erange = erange
+	b = data.get_paramval()
+	tableo.disp("bdependence%s.csv" % outputid, data, params, erange = table_erange,
+	            observables = obsids, dependence = [b, "b", "T"])
+	plotobs = plotopts.get('obs')
+	tableo.disp_byband("bdependence%s.csv" % outputid, data, params, erange = erange,
+	                   observable = plotobs, dependence = [b, "b", "T"])
+
+	## Write XML
+	xmlio.writefile("output%s.xml" % outputid, data, params, observables = obsids,
+	                caller = SCRIPT, options = opts, dependence = [b, "b", "T"],
+	                dependentoptions = [])
+
+	## Write plot
+	if len(bs) > 1:
+		fig_bdep = ploto.bands_1d(data, filename = "bdependence%s.pdf" % outputid, showplot = False, erange = erange, **plotopts)
+	else:
+		sys.stderr.write("Warning (%s): For 0-dimensional arrays, skip plot.\n" % SCRIPT)
+		fig_bdep = None
+
+	## Density of states (data and plots)
+	if 'dos' in sysargv or 'hall' in sysargv:
+		ee_at_idos = postprocess.dos_ll(params, data, erange, outputid, opts, plotopts, fig_bdep = fig_bdep)
+		if 'byblock' in sysargv or 'byisopz' in sysargv:
+			postprocess.dos_byobs('ll', params, data, 'isopz', erange, outputid, opts, plotopts, fig_bdep = fig_bdep)
+		if 'densityz' in sysargv:
+			postprocess.densityz_ll(params, data, erange, outputid, opts, plotopts, ll_full = (ll_mode == 'full'))
+	else:
+		ee_at_idos = None
+
+	## Berry curvature (data and plots)
+	if 'berry' in sysargv or 'chern' in sysargv or 'hall' in sysargv:
+		if ll_mode in ['sym', 'full']:
+			postprocess.berry_ll(params, data, erange, outputid, opts, plotopts)
+		else:
+			sys.stderr.write("ERROR (%s): Option 'berry', 'chern', or 'hall' not implemented for %s mode\n" % (SCRIPT, ll_mode))
+
+	## Transitions (data and plots)
+	if 'transitions' in opts and (opts['transitions'] is not False) and ll_mode in ['sym', 'full']:
+		postprocess.transitions(params, data, erange, outputid, opts, plotopts, ee_at_idos = ee_at_idos, fig_bdep = fig_bdep)
+	elif 'transitions' in opts and (opts['transitions'] is not False):
+		sys.stderr.write("ERROR (%s): Option 'transitions' not implemented for %s mode\n" % (SCRIPT, ll_mode))
+
+	# Local DOS (data and plots)
+	if "localdos" in sysargv or 'hall' in sysargv:
+		postprocess.localdos_ll(params, data, erange, outputid, opts, plotopts)
+
+	## Warning for unparsed arguments
+	unparsed = sysargv.unparsed_warning(color = sys.stderr.isatty())
+	if unparsed is not None:
+		sys.stderr.write("Warning (%s): The following marked command line arguments were not used: %s\n" % (SCRIPT, unparsed))
+
+	exit(0)
+
+if __name__ == '__main__':
+	main()
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/kdotpy-merge.py b/kdotpy-v1.0.0/src/kdotpy/kdotpy-merge.py
new file mode 100644
index 0000000000000000000000000000000000000000..0237805682b4680777b6cf6e8aab8fefa79e3382
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/kdotpy-merge.py
@@ -0,0 +1,279 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import sys
+import os
+import os.path
+import re
+
+from .config import initialize_config, cmdargs_config
+from .materials import initialize_materials
+from . import cmdargs
+from .bandalign import bandindices
+from . import postprocess
+
+from . import ploto
+from . import tableo
+from . import xmlio
+
+SCRIPT = os.path.basename(__file__)  # the filename of this script, without directory
+
+def main():
+	initialize_config()
+	initialize_materials()
+	ploto.initialize()
+
+	erange = cmdargs.erange()  # plot range (energy)
+
+	filenames = []
+	defaultfilename = "output.xml"
+	accept_default_files = False
+	# If the argument '--' appears in the command line, consider only the arguments
+	# after the last '--' as input files
+	argstart = 1
+	for j in range(1, len(sys.argv)):
+		if sys.argv[j] == '--':
+			argstart = j
+	for a in sys.argv[argstart:]:
+		if os.path.isfile(a):
+			if a.endswith(".tar.gz") or a.endswith(".tar"):
+				tar_contents = xmlio.find_in_tar(a, "output*.xml")
+				if tar_contents is not None and tar_contents != []:
+					filenames.extend(tar_contents)
+			else:
+				filenames.append(a)
+		elif os.path.isdir(a):
+			ls = os.listdir(a)
+			filenames_thisdir = []
+			for fname in ls:
+				m = re.match(r"output.*\.xml(\.gz)?", fname)
+				if m is not None:
+					filenames_thisdir.append(os.path.join(a, fname))
+				m = re.match(r"data.*\.tar\.gz", fname)
+				if m is not None:
+					tar_contents = xmlio.find_in_tar(os.path.join(a, fname), "output*.xml")
+					if tar_contents is not None and tar_contents != []:
+						filenames_thisdir.extend(tar_contents)
+			if len(filenames_thisdir) > 0:
+				filenames.extend(filenames_thisdir)
+			else:
+				sys.stderr.write("Warning (%s): No data files \"output*.xml\" found in directory %s\n" % (SCRIPT, a))
+
+	if len(filenames) == 0:
+		if not accept_default_files:
+			sys.stderr.write("ERROR (%s): No data files\n" % SCRIPT)
+			exit(2)
+		elif os.path.isfile(defaultfilename):
+			sys.stderr.write("Warning (%s): No data files specified. Using default \"%s\"\n" % (SCRIPT, defaultfilename))
+			filenames = [defaultfilename]
+		else:
+			ls = os.listdir(".")
+			for fname in ls:
+				m = re.match(r"output.*\.xml", fname)
+				if m is not None:
+					filenames.append(fname)
+			if len(filenames) == 0:
+				sys.stderr.write("ERROR (%s): No data files found.\n" % SCRIPT)
+				exit(2)
+			else:
+				sys.stderr.write("Warning (%s): No data files specified. Using default \"output.##.xml\"; %i files\n" % (SCRIPT, len(filenames)))
+
+	print("Files:")
+	for f in filenames:
+		print(("%s:%s" % f) if isinstance(f, tuple) else f)
+
+	data, params, dependence = xmlio.readfiles(filenames)
+	if len(data) == 0:
+		sys.stderr.write("ERROR (%s): No data.\n" % SCRIPT)
+		exit(2)
+	if not (dependence == 'k' or dependence == 'b' or (isinstance(dependence, list) and dependence[1] == 'b')):
+		sys.stderr.write("ERROR (%s): Unexpected dependence type.\n" % SCRIPT)
+		exit(1)
+
+	# Reload configuration options from command line, because they may have been
+	# overwritten by readfiles.
+	cmdargs_config()
+
+	opts = cmdargs.options(axial_automatic = 'ignore')
+	vgrid = {} if data.grid is None else data.grid  # VectorGrid for plot title formatting
+	plotopts = cmdargs.plot_options(format_args = (params, opts, vgrid))
+	curdir, outdir = cmdargs.outdir()  # changes dir as well
+	outputid = cmdargs.outputid(format_args = (params, opts, vgrid))  # output filename modifier
+
+	if "select" in sys.argv:
+		argn = sys.argv.index("select")
+		try:
+			sel_component = sys.argv[argn + 1]
+			sel_value = float(sys.argv[argn + 2])
+		except:
+			sys.stderr.write("ERROR (%s): Argument \"select\" must be followed by k, kx, ky, or kphi and a number.\n" % SCRIPT)
+			exit(1)
+		# sel_idx, sel_kval = k_select(data.get_momenta(), sel_component, sel_value)
+		sel_idx, sel_kval = data.get_momentum_grid().select(sel_component, sel_value)
+		data = [data[j] for j in sel_idx]
+
+		if len(data) == 0:
+			sys.stderr.write("Warning (%s): No data for this selection.\n" % SCRIPT)
+
+	if len(data) == 0:
+		sys.stderr.write("ERROR (%s): Nothing to be plotted.\n" % SCRIPT)
+		exit(2)
+
+	if "sortdata" in sys.argv:
+		data.sort_by_grid()
+
+	if "verbose" in sys.argv:
+		if dependence == 'k':
+			print("Momentum values:")
+			print(", ". join([str(d.k) for d in data]))
+		elif dependence == 'b' or isinstance(dependence, list):
+			print("Parameter (B) values:")
+			print(", ". join([str(d.paramval) for d in data]))
+		else:
+			raise ValueError("Illegal value for variable dependence")
+
+	e0 = None
+	bandalign_opts = cmdargs.bandalign(directory = curdir)
+	if bandalign_opts:  # false if None or {}
+		if bandalign_opts.get('e0') is None and bandalign_opts.get('from_file') is None:
+			sys.stderr.write("Warning (%s): Re-aligning (reconnecting) the states with automatically determined 'anchor energy'. If the result is not satisfactory or if the precise band indices are important, you should define the anchor energy explicitly by 'bandalign -4' (value is energy in meV).\n" % SCRIPT)
+		if data.grid is None:
+			sys.stderr.write("Warning (%s): Re-aligning (reconnecting) the states may fail if the data is unsorted. Due to absence of a VectorGrid instance in the data, it cannot be determined whether sorting is necessary.\n" % SCRIPT)
+		elif not data.grid.is_sorted():
+			sys.stderr.write("Warning (%s): For re-aligning (reconnecting) the states, automatically attempt to sort the data.\n" % SCRIPT)
+			data.sort_by_grid()
+		bandindices(data, **bandalign_opts)
+
+	## Energy shift (TODO: Not very elegant)
+	if 'zeroenergy' in opts and opts['zeroenergy']:
+		e_ref = 0.0 if 'eshift' not in opts else opts['eshift']
+		eshift = data.set_zero_energy(e_ref)
+		if eshift is not None:
+			sys.stderr.write("Warning (%s): Energy shifted by %.3f meV. Experimental function: Other input and output energies may still refer to the unshifted energy.\n" % (SCRIPT, eshift))
+	elif 'eshift' in opts and opts['eshift'] != 0.0:
+		data.shift_energy(opts['eshift'])
+		sys.stderr.write("Warning (%s): Energy shifted by %.3f meV. Experimental function: Other input and output energies may still refer to the unshifted energy.\n" % (SCRIPT, opts['eshift']))
+
+	if dependence == 'k':
+		if 'symmetrytest' in sys.argv:
+			data.symmetry_test('x', ignore_lower_dim = True)
+			data.symmetry_test('y', ignore_lower_dim = True)
+			data.symmetry_test('z', ignore_lower_dim = True)
+			data.symmetry_test('xy', ignore_lower_dim = True)
+			data.symmetry_test('xyz', ignore_lower_dim = True)
+		if 'symmetrize' in sys.argv:
+			data = data.symmetrize('xyz')
+			if 'symmetrytest' in sys.argv:
+				print()
+				print("Symmetries after symmetrization:")
+				data.symmetry_test('x', ignore_lower_dim = True)
+				data.symmetry_test('y', ignore_lower_dim = True)
+				data.symmetry_test('z', ignore_lower_dim = True)
+				data.symmetry_test('xy', ignore_lower_dim = True)
+				data.symmetry_test('xyz', ignore_lower_dim = True)
+
+		if len(data.shape) == 1 and len(data) > 1:
+			ploto.bands_1d(data, filename = "dispersion%s.pdf" % outputid, showplot = False, erange = erange, **plotopts)
+		elif len(data.shape) == 2 and len(data) > 1:
+			ploto.bands_2d(data, filename = "dispersion2d%s.pdf" % outputid, showplot = False, erange = erange, **plotopts)
+		elif len(data.shape) > 2 or len(data) == 1:
+			sys.stderr.write("Warning (%s): For 0- and 3-dimensional arrays, skip plot.\n" % SCRIPT)
+		else:
+			sys.stderr.write("ERROR (%s): Array of invalid dimension or size.\n" % SCRIPT)
+			exit(1)
+	elif isinstance(dependence, list):
+		if dependence[1] == 'b':
+			paramtex = ploto.format_axis_label("$B$", "$\\mathrm{T}$")
+		elif dependence[2] != "":
+			paramtex = ploto.format_axis_label("$%s$" % dependence[1], "$\\mathrm{%s}$" % dependence[2])
+		else:
+			paramtex = "$%s$" % dependence[1]
+		if len(data.shape) == 1 and len(data) > 1:
+			ploto.bands_1d(data, filename = "%sdependence%s.pdf" % (dependence[1], outputid), showplot = False, erange = erange, paramstr = paramtex, **plotopts)
+		elif len(data.shape) >= 2 or len(data) == 1:
+			sys.stderr.write("Warning (%s): For 0-, 2-, and 3-dimensional arrays, skip plot.\n" % SCRIPT)
+		else:
+			sys.stderr.write("ERROR (%s): Array of invalid dimension or size.\n" % SCRIPT)
+			exit(1)
+	else:
+		raise ValueError("Illegal value for variable dependence")
+
+	## Write Table
+	if "writecsv" in sys.argv and len(data) > 0:
+		dependencestr = "bdependence" if dependence == 'b' else 'dispersion'
+		dependencedata = [data.get_paramval(), "b", "T"] if dependence == 'b' else None
+		obsids = data[0].obsids
+		tableo.disp("%s%s.csv" % (dependencestr, outputid), data, params, observables = obsids, dependence = dependencedata)
+		plotobs = plotopts.get('obs')
+		if len(data.shape) in [1, 2] and dependence == 'k':
+			tableo.disp_byband("%s%s.csv" % (dependencestr, outputid), data, params, erange = erange, observable = plotobs)
+		elif len(data.shape) == 1 and dependence == 'b' or (isinstance(dependence, list) and dependence[1] == 'b'):
+			b = data.get_paramval()
+			tableo.disp_byband("bdependence%s.csv" % outputid, data, params, erange = erange, observable = plotobs, dependence = [b, "b", "T"])
+		else:
+			sys.stderr.write("Warning (%s): Data shape and/or dependence not suitable for csv output.\n" % SCRIPT)
+
+	try:
+		dep = data.gridvar
+	except:
+		dep = None
+
+	## Density of states
+	if "dos" in sys.argv and dep == 'k' and params.kdim in [1, 2]:
+		idos, energies = postprocess.dos_k(params, data, erange, outputid, opts, plotopts, energies = {'e0': e0}, onedim = (params.kdim == 1))
+	elif "dos" in sys.argv and dep == 'b':
+		postprocess.dos_ll(params, data, erange, outputid, opts, plotopts)
+	elif "dos" in sys.argv:
+		sys.stderr.write("Warning (%s): DOS calculation requires k dependence and momentum dimension = 1 or 2, or b dependence\n" % SCRIPT)
+	else:
+		idos = None
+
+	# Local DOS (data and plots)
+	if "localdos" in sys.argv and dep == 'b':
+		postprocess.localdos_ll(params, data, erange, outputid, opts, plotopts)
+	elif "localdos" in sys.argv:
+		sys.stderr.write("Warning (%s): Local DOS calculation requires b dependence.\n" % SCRIPT)
+		idos = None
+
+	exit(0)
+
+if __name__ == '__main__':
+	main()
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/kdotpy-test.py b/kdotpy-v1.0.0/src/kdotpy/kdotpy-test.py
new file mode 100644
index 0000000000000000000000000000000000000000..dfb1f23d51ccc898aa045497469f4501990ec2df
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/kdotpy-test.py
@@ -0,0 +1,410 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import sys
+import os.path
+from platform import system
+import subprocess
+from time import time as rtime
+import re
+import shlex
+
+testdir = 'test'
+_kdotpy_cmd = 'kdotpy'
+_verbose = False
+_showcmd = False
+
+def run_test(cmd, args, append_outdir=True):
+	if append_outdir:
+		args = args + ['outdir', testdir]
+	if _verbose:
+		args = args + ['verbose']
+	args = [_kdotpy_cmd, cmd] + args
+	try:
+		cp = subprocess.run(args)
+	except OSError:
+		if system() != 'Windows':
+			args = " ".join([shlex.quote(arg) for arg in args])
+		cp = subprocess.run(args, shell=True)
+	return cp
+
+def cmd_to_string(cmd, args, append_outdir=True):
+	if append_outdir:
+		args = args + ['outdir', testdir]
+	if _verbose:
+		args = args + ['verbose']
+	cmd_str = _kdotpy_cmd + " " + cmd
+	return (cmd_str + " " + " ".join([shlex.quote(arg) for arg in args]))
+
+## Class definitions compatible with pytest
+class TestRuns:
+	def test_2d_qw(self, get_cmd = False):
+		cmd = "2d"
+		args = "8o obs subbande1e2h1h2l1 overlaps erange -100 20 k 0 0.5 / 25 kphi 45 zres 0.25 targetenergy -35 " \
+		       "neig 20 split 0.01 llayer 5 11 5 mlayer HgCdTe 68% HgMnTe 2.4% HgCdTe 68% msubst CdZnTe 4% noax " \
+		       "out .test_2d_qw dos localdos dostemp 2 char legend extrema symmetrize bhz " \
+		       "config fig_colorbar_method=file;dos_quantity=e;dos_unit=cm".split(" ")
+		if get_cmd:
+			return cmd_to_string(cmd, args)
+		cp = run_test(cmd, args)
+		exitcode = cp.returncode
+		assert exitcode == 0
+
+	def test_2d_qw_bia(self, get_cmd = False):
+		cmd = "2d"
+		args = "8o obs subbande1e2h1h2l1 overlaps erange -100 20 k -0.5 0.5 / 50 kphi 45 zres 0.25 targetenergy -35 " \
+		       "neig 20 split 0.01 llayer 5 11 5 mlayer HgCdTe 68% HgMnTe 2.4% HgCdTe 68% msubst CdZnTe 4% noax " \
+		       "out .test_2d_qw_bia char legend bia config fig_colorbar_method=file".split(" ")
+		if get_cmd:
+			return cmd_to_string(cmd, args)
+		cp = run_test(cmd, args)
+		exitcode = cp.returncode
+		assert exitcode == 0
+
+	def test_2d_qw_2(self, get_cmd = False):
+		cmd = "2d"
+		args = "8o obs subbande1e2h1h2l1 overlaps erange -100 20 k -0.48 0.48 / 24 kphi 45 zres 0.25 targetenergy -35 " \
+		       "neig 20 split 0.01 llayer 5 11 5 mlayer HgCdTe 68% HgMnTe 2.4% HgCdTe 68% msubst CdZnTe 4% noax " \
+		       "out .test_2d_qw_2 char legend config fig_colorbar_method=file".split(" ")
+		if get_cmd:
+			return cmd_to_string(cmd, args)
+		cp = run_test(cmd, args)
+		exitcode = cp.returncode
+		assert exitcode == 0
+
+	def test_2d_polar(self, get_cmd = False):
+		cmd = "2d"
+		args = "8o obs orbitalrgb erange -100 20 k 0 0.5 / 25 kphi 0 90 / 6 zres 0.25 targetenergy -35 neig 20 " \
+		       "split 0.01 llayer 5 11 5 mlayer HgCdTe 68% HgMnTe 2.4% HgCdTe 68% msubst CdZnTe 4% noax " \
+		       "out .test_2d_polar char legend plotstyle spinxy extrema".split(" ")
+		if get_cmd:
+			return cmd_to_string(cmd, args)
+		cp = run_test(cmd, args)
+		exitcode = cp.returncode
+		assert exitcode == 0
+
+	def test_2d_orient(self, get_cmd = False):
+		cmd = "2d"
+		args = "8o obs orbitalrgb erange -100 50 k 0 0.5 / 25 kphi 0 360 / 24 zres 0.25 targetenergy -35 neig 20 " \
+		       "split 0.01 llayer 5 8 5 mlayer HgCdTe 68% HgTe HgCdTe 68% msubst CdZnTe 4% noax " \
+		       "out .test_2d_orient char legend orient 30d 111 symmetrytest".split(" ")
+		if get_cmd:
+			return cmd_to_string(cmd, args)
+		cp = run_test(cmd, args)
+		exitcode = cp.returncode
+		assert exitcode == 0
+
+	def test_2d_cartesian(self, get_cmd = False):
+		cmd = "2d"
+		args = "8o obs jz erange -100 20 kx 0 0.5 / 20 ky 0 0.5 / 20 zres 0.25 targetenergy -35 neig 24 split 0.01 " \
+		       "llayer 5 11 5 mlayer HgCdTe 68% HgMnTe 2.4% HgCdTe 68% msubst CdZnTe 4% noax " \
+		       "out .test_2d_cartesian char legend extrema plotwf separate zero".split(" ")
+		if get_cmd:
+			return cmd_to_string(cmd, args)
+		cp = run_test(cmd, args)
+		exitcode = cp.returncode
+		assert exitcode == 0
+
+	def test_2d_offset(self, get_cmd = False):
+		cmd = "2d"
+		args = "8o obs jz erange -100 20 kx 0.1 ky 0 0.5 / 20 zres 0.25 targetenergy -35 neig 24 split 0.01 " \
+		       "llayer 5 11 5 mlayer HgCdTe 68% HgMnTe 2.4% HgCdTe 68% msubst CdZnTe 4% noax " \
+		       "out .test_2d_offset char legend extrema dos plotwf separate zero".split(" ")
+		if get_cmd:
+			return cmd_to_string(cmd, args)
+		cp = run_test(cmd, args)
+		exitcode = cp.returncode
+		assert exitcode == 0
+
+	def test_ll_legacy(self, get_cmd = False):
+		cmd = "ll"
+		args = "8o temp 0.1 zres 0.25 llayer 10 12 10 b 0 1.6 // 16 k 0 mlayer HgCdTe 68% HgTe HgCdTe 68% " \
+		       "erange -90 50 targetenergy 10 neig 120 nll 10 msubst CdTe legend char split 0.01 obs llindex " \
+		       "out .test_ll_legacy dos cardens 0.002 localdos broadening 0.5 20% lllegacy " \
+		       "config plot_transitions_labels=false;fig_ticks_major=more;fig_ticks_minor=normal;fig_unit_format=();plot_dos_units_negexp=true".split(" ")
+		if get_cmd:
+			return cmd_to_string(cmd, args)
+		cp = run_test(cmd, args)
+		exitcode = cp.returncode
+		assert exitcode == 0
+
+	def test_ll_axial(self, get_cmd = False):
+		cmd = "ll"
+		args = "8o temp 0.1 zres 0.25 llayer 10 12 10 b 0 1.6 // 16 k 0 mlayer HgCdTe 68% HgTe HgCdTe 68% " \
+		       "erange -90 50 targetenergy 10 neig 120 nll 10 msubst CdTe legend char split 0.01 obs llindex berry " \
+		       "out .test_ll_axial transitions dos cardens 0.002 localdos broadening 0.5 20% " \
+		       "config plot_transitions_labels=false;fig_ticks_major=more;fig_ticks_minor=normal;fig_unit_format=();plot_dos_units_negexp=true".split(" ")
+		if get_cmd:
+			return cmd_to_string(cmd, args)
+		cp = run_test(cmd, args)
+		exitcode = cp.returncode
+		assert exitcode == 0
+
+	def test_ll_bia(self, get_cmd = False):
+		cmd = "ll"
+		args = "8o temp 0.1 zres 0.25 llayer 10 12 10 b 0 1.6 // 16 k 0 mlayer HgCdTe 68% HgTe HgCdTe 68% " \
+		       "erange -90 50 targetenergy 10 neig 50 nll 5 bia msubst CdTe legend char split 0.01 obs llavg berry " \
+		       "out .test_ll_bia transitions dos cardens 0.002 localdos broadening 0.5 20% " \
+		       "config plot_transitions_labels=false;fig_ticks_major=more;fig_ticks_minor=normal;fig_unit_format=();plot_dos_units_negexp=true".split(" ")
+		if get_cmd:
+			return cmd_to_string(cmd, args)
+		cp = run_test(cmd, args)
+		exitcode = cp.returncode
+		assert exitcode == 0
+
+	def test_bulk_ll(self, get_cmd = False):
+		cmd = "bulk-ll"
+		args = "8o b 0 10 / 100 split 0.01 mater HgTe msubst CdZnTe 4% neig 30 nll 10 " \
+		       "out .test_bulk_ll erange -20 20 obs bindex legend".split(" ")
+		if get_cmd:
+			return cmd_to_string(cmd, args)
+		cp = run_test(cmd, args)
+		exitcode = cp.returncode
+		assert exitcode == 0
+
+	def test_bulk(self, get_cmd = False):
+		cmd = "bulk"
+		args = "8o overlaps obs jz erange -100 20 k 0 0.5 / 50 kphi 45 split 0.01 mater HgTe msubst CdZnTe 4% noax " \
+		       "out .test_bulk char legend symmetrize".split(" ")
+		if get_cmd:
+			return cmd_to_string(cmd, args)
+		cp = run_test(cmd, args)
+		exitcode = cp.returncode
+		assert exitcode == 0
+
+	def test_bulk_3d(self, get_cmd = False):
+		cmd = "bulk"
+		args = "8o obs jz erange -100 20 kx 0 0.15 / 15 ky 0 0.15 / 15 kz 0 0.15 / 15 split 0.01 mater HgTe strain -0.3% noax " \
+		       "out .test_bulk_3d char legend extrema dos".split(" ")
+		if get_cmd:
+			return cmd_to_string(cmd, args)
+		cp = run_test(cmd, args)
+		exitcode = cp.returncode
+		assert exitcode == 0
+
+	def test_merge(self, get_cmd = False):
+		cmd = "merge"
+		datadir = testdir if os.path.isdir(testdir) else '.'
+		args = ["out", ".test_merge", "reconnect", "-25", "2", 'outdir', testdir, "--",
+		        os.path.join(datadir, "output.test_2d_qw.xml"), os.path.join(datadir, "output.test_2d_qw_2.xml")]
+		# TODO: Run test_2d_qw and test_2d_qw_2 when data files are not found
+		if get_cmd:
+			return cmd_to_string(cmd, args, append_outdir=False)
+		cp = run_test(cmd, args, append_outdir=False)
+		exitcode = cp.returncode
+		assert exitcode == 0
+
+	def test_compare_2d(self, get_cmd = False):
+		cmd = "compare"
+		datadir = testdir if os.path.isdir(testdir) else '.'
+		args = ["out", ".test_compare_2d", "legend", 'outdir', testdir, "--",
+		        os.path.join(datadir, "output.test_2d_qw.xml"), "vs",
+		        os.path.join(datadir, "output.test_2d_qw_2.xml")]
+		# TODO: Run test_2d_qw and test_2d_qw_2 when data files are not found
+		if get_cmd:
+			return cmd_to_string(cmd, args, append_outdir=False)
+		cp = run_test(cmd, args, append_outdir=False)
+		exitcode = cp.returncode
+		assert exitcode == 0
+
+	def test_compare_ll(self, get_cmd = False):
+		cmd = "compare"
+		datadir = testdir if os.path.isdir(testdir) else '.'
+		args = ["out", ".test_compare_ll", "legend", "erange", "-90", "-50", 'outdir', testdir, "--",
+		        os.path.join(datadir, "output.test_ll_axial.xml"), "vs",
+		        os.path.join(datadir, "output.test_ll_bia.xml")]
+		# TODO: Run test_ll_axial and test_ll_bia when data files are not found
+		if get_cmd:
+			return cmd_to_string(cmd, args, append_outdir=False)
+		cp = run_test(cmd, args, append_outdir=False)
+		exitcode = cp.returncode
+		assert exitcode == 0
+
+	def test_2d_selfcon(self, get_cmd = False):
+		cmd = "2d"
+		args = "8o obs orbitalrgb erange -100 60 k 0 0.5 / 50 kphi 45 zres 0.25 targetenergy -35 neig 20 " \
+		       "split 0.01 llayer 5 11 5 mlayer HgCdTe 68% HgMnTe 2.4% HgCdTe 68% msubst CdZnTe 4% noax " \
+		       "out .test_2d_selfcon dos dostemp 2 char legend selfcon cardens 0.002 plotqz " \
+		       "config selfcon_full_diag=false".split(" ")
+		if get_cmd:
+			return cmd_to_string(cmd, args)
+		cp = run_test(cmd, args)
+		exitcode = cp.returncode
+		assert exitcode == 0
+
+	def test_batch(self, get_cmd = False):
+		cmd1 = "batch"
+		cmd2 = "2d"
+		args1 = "@d 7 9 / 2".split(" ")
+		args2 = "8o obs jz erange -100 20 kx 0 0.5 / 20 zres 0.25 targetenergy -35 neig 24 split 0.01 " \
+		        "llayer 5 @d 5 mlayer HgCdTe 68% HgTe HgCdTe 68% msubst CdZnTe 4% noax " \
+		        "out .test_batch.@0of@@ char legend".split(" ")
+		args = args1 + ['do', _kdotpy_cmd, cmd2] + args2
+		if get_cmd:
+			return cmd_to_string(cmd1, args)
+		cp = run_test(cmd1, args)
+		exitcode = cp.returncode
+		assert exitcode == 0
+
+	def test_1d(self, get_cmd = False):
+		cmd = "1d"
+		args = "8o axial mlayer HgCdTe 68% HgTe HgCdTe 68% msubst CdZnTe 4% llayer 5 7 5 zres 0.25 w 10 wres 0.5 " \
+		       "k 0 0.3 / 6 symmetrize split 0.01 targetenergy -30 neig 30 obs sz out .test_1d bia legend".split(" ")
+		if get_cmd:
+			return cmd_to_string(cmd, args)
+		cp = run_test(cmd, args)
+		exitcode = cp.returncode
+		assert exitcode == 0
+
+
+## If called from the command line
+def main():
+	global _showcmd, _verbose
+
+	# Terminal colors
+	COLOR_DISPLAY = sys.stdout.isatty()
+	cred = '\x1b[1;31m' if COLOR_DISPLAY else ''
+	cgreen = '\x1b[1;32m' if COLOR_DISPLAY else ''
+	cyellow = '\x1b[1;33m' if COLOR_DISPLAY else ''
+	cblue = '\x1b[1;34m' if COLOR_DISPLAY else ''
+	cpurple = '\x1b[1;35m' if COLOR_DISPLAY else ''
+	ccyan = '\x1b[1;36m' if COLOR_DISPLAY else ''
+	cwhite = '\x1b[1;37m' if COLOR_DISPLAY else ''
+	creset = '\x1b[0m' if COLOR_DISPLAY else ''
+
+	tr = TestRuns()
+	all_tests = [member[5:] for member in dir(tr) if member.startswith('test')]
+	# Change default alphabetical test sort order:
+	# Move tests that depend on input files from other runs to the end.
+	# Test run order can be changed by order in sys.argv
+	independent_tests = []
+	dependent_tests = []
+	for test in all_tests:
+		if 'compare' in test or 'merge' in test:
+			dependent_tests.append(test)
+		else:
+			independent_tests.append(test)
+	all_tests = independent_tests + dependent_tests
+	del independent_tests, dependent_tests
+
+	invalid_testid = False
+	if len(sys.argv) >= 3:
+		do_tests = []
+		if sys.argv[2].lower() == 'list':
+			print("Valid test ids: %s\n" % (", ".join(all_tests)))
+			exit(0)
+		for a in sys.argv[2:]:
+			a = a.replace('-', '_')  # Also allow more comfortable input names with '-' instead of '_'.
+			if a in all_tests:
+				do_tests.append(a)
+			elif a == 'verbose':
+				_verbose = True
+			elif 'python' in a:
+				m = re.search(r"python(3([.][0-9]+)?([.][0-9]+)?)?$", a)
+				if m is None:
+					sys.stderr.write("ERROR (kdotpy-test.py): '%s' is not a valid Python command. Allowed commands: 'python', 'python3', 'python3.x', 'python3.x.y' (where x, y are numbers).\n" % a)
+				else:
+					_python_cmd = a	
+			elif a.startswith('showcmd'):
+				_showcmd = True
+			else:
+				sys.stderr.write("ERROR (kdotpy-test.py): Test id '%s' is not valid.\n" % a)
+				invalid_testid = True
+		if len(do_tests) == 0:
+			if not invalid_testid:
+				# catch the case that verbose or showcmd option was given without any test id (i.e. run all tests).
+				do_tests = all_tests
+			else:
+				sys.stderr.write("ERROR (kdotpy-test.py): No valid test ids given.\nValid ids: %s\n" % (", ".join(all_tests)))
+				exit(2)
+	else:
+		do_tests = all_tests
+	print(("# " if _showcmd else "") + "------ kdotpy test suite ------")
+	runtimes = []
+	stati = []
+	t0 = rtime()
+	for testid in do_tests:
+		testfunc = getattr(tr, 'test_' + testid)
+		if _showcmd:
+			print("# Test %s:" % testid)
+			print(testfunc(get_cmd = True))
+			print()
+			continue
+		t1 = rtime()
+		try:
+			print("Starting test %s..." % testid)
+			testfunc()
+		except AssertionError:
+			print("%sTest %s%s: %sFailed%s" % (cwhite, cpurple, testid, cred, creset), end = '\n\n')
+			stati.append(False)
+			runtimes.append(rtime() - t1)
+		except:
+			raise
+		else:
+			print("%sTest %s%s: %sSuccess%s" % (cwhite, cpurple, testid, cgreen, creset), end = '\n\n')
+			stati.append(True)
+			runtimes.append(rtime() - t1)
+
+	if _showcmd:
+		exit(0)
+
+	if len(runtimes) > 0:
+		# summarize test timings:
+		# column widths:
+		cw = [max(10, len(max(do_tests, key=len)) + 1), 9, 13]
+		print('Test'.center(cw[0]) + '|' + 'Status'.center(cw[1]) + '|' + 'Runtime (s)'.center(cw[2]))
+		print(('-' * cw[0]) + '+' + ('-' * cw[1]) + '+' + ('-' * cw[2]))
+		for i, (testid, status, runtime) in enumerate(zip(do_tests, stati, runtimes)):
+			color = cblue if i % 2 else ccyan
+			print(color + testid.ljust(cw[0]) + creset, end = '|')
+			if status:
+				print(cgreen + 'success'.center(cw[1]) + creset, end = '|')
+			else:
+				print(cred + 'failed'.center(cw[1]) + creset, end = '|')
+			print(color + ('%.1f' % runtime).rjust(cw[2]) + creset)
+		print('-' * (sum(cw) + len(cw) - 1))
+		print("Total run time: %ds" % (rtime() - t0))
+
+	if invalid_testid:
+		sys.stderr.write("ERROR (kdotpy-test.py): Argument list contains invalid test id.\nValid ids: %s\n" % (", ".join(all_tests)))
+
+if __name__ == '__main__':
+	main()
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/latticetrans.py b/kdotpy-v1.0.0/src/kdotpy/latticetrans.py
new file mode 100644
index 0000000000000000000000000000000000000000..e021338ca2a236c11456ccb06d2637692fe94eda
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/latticetrans.py
@@ -0,0 +1,340 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+import numpy.linalg as nplin
+
+### HELPER FUNCTIONS ###
+
+def norm_vec(v):
+	"""Return normalized vector v / |v|.
+
+	Argument:
+	v   One-dimensional numpy array.
+	"""
+	n = np.sqrt(np.dot(v, v))
+	if n == 0:
+		raise ValueError("Divison by zero: Cannot normalize zero vector")
+	return v / n
+
+def isorthogonal(m, check_det = True):
+	"""Test if matrix is orthogonal
+
+	Arguments:
+	m          A numpy array of square shape.
+	check_det  True or False. If True, return True only if det(m) > 0 and if m
+	           is orthogonal. If False, also return True for orthogonal m that
+	           do not satisfy the condition det(m) > 0.
+
+	Returns:
+	True or False
+	"""
+	if not isinstance(m, np.ndarray):
+		raise TypeError("Not an array")
+	if len(m.shape) != 2 or m.shape[0] != m.shape[1]:
+		raise ValueError("Not a square array (n x n matrix)")
+	iden = np.asarray(np.identity(m.shape[0]))
+	m1 = np.asarray(m)
+	isortho = np.allclose(m1 @ m1.T, iden, rtol = 0, atol = 1e-13) and np.allclose(m1.T @ m1, iden, rtol = 0, atol = 1e-13)
+	return isortho and (nplin.det(m) > 0.0) if check_det else isortho
+
+def normvec_to_intvec(arr, imax = 100, tol = 1e-9):
+	"""Extract integer vector from normalized vector
+	If the input vector is parallel to a vector (i, j, k) with integer i, j, k,
+	return the smallest possible integer vector (which is (i, j, k) if their
+	greatest common divisor is 1.) The algorithm tries multiplying the squares
+	of the elements by a common multiplier until all are integer.
+
+	Arguments:
+	arr    A numpy array
+	imax   Positive integer. More or less the largest integer to check for.
+	       More precisely, imax^2 is the largest multiplier of the square
+	       elements that the algorithm tries.
+	tol    Float. Tolerance; used as the atol argument in numpy.allclose. See
+	       documentation of numpy.allclose for more information.
+
+	Returns:
+	On success, an array of integers. Otherwise return the original vector.
+	"""
+	arr2 = np.asarray(arr)**2
+	for m in range(1, imax**2 + 1):
+		if np.allclose(m * arr2, np.around(m * arr2), rtol = 0, atol = tol):
+			arr_i = np.sqrt(m * arr2) * np.sign(np.asarray(arr))
+			if np.allclose(arr_i, np.around(arr_i), rtol = 0, atol = tol):  # avoid spurious solutions
+				return np.around(arr_i).astype(int).flatten()
+	return np.asarray(arr).flatten()
+
+def euler_angles_zxz(m, degrees = False, sing_alpha = False):
+	"""Extract Euler angles from transformation matrix.
+	Choose z, x, z axes, as with the transformation defined in rotation_zxz().
+
+	Arguments:
+	m           A numpy array of shape (3, 3) that defines an orthogonal matrix
+	            with det(m) = 1.
+    degrees     True or False. If True, return angles in degrees. If False,
+                return angles in radians.
+    sing_alpha  True or False. If sin(beta) = 0 (second angle), alpha and gamma
+                are undefined seperately, but alpha +/- gamma is defined. In
+                this case, return nonzero alpha and	gamma = 0 if sing_alpha is
+                set to True, and nonzero gamma and alpha = 0 otherwise.
+
+	Returns:
+	A numpy array of length 3.
+   	"""
+	if not isorthogonal(m, check_det = True):
+		raise ValueError("Not an orthogonal matrix with det = +1.")
+	if m.shape != (3, 3):
+		raise ValueError("Not a 3x3 array")
+	cosbeta = m[2, 2]
+	sinbeta = np.sqrt(1.0 - m[2, 2]**2)  # choose positive value, without loss of generality
+	if np.abs(sinbeta) < 1e-8:
+		if sing_alpha:
+			cosalpha, sinalpha = m[0, 0], m[0, 1]
+			cosgamma, singamma = 1.0, 0.0
+		else:
+			cosalpha, sinalpha = 1.0, 0.0
+			cosgamma, singamma = m[0, 0], -m[1, 0]
+	else:
+		cosalpha, sinalpha = -m[2, 1], m[2, 0]
+		cosgamma, singamma = m[1, 2], m[0, 2]
+	angles = np.array([np.arctan2(sinalpha, cosalpha), np.arccos(cosbeta), np.arctan2(singamma, cosgamma)])
+	return angles * 180. / np.pi if degrees else angles
+
+def isangle(x):
+	"""For orientation elements: Return True if element is an angle (numeric value)"""
+	return isinstance(x, (int, float, np.integer, np.floating))
+
+def isdir(x):
+	"""For orientation elements: Return True if element is a direction (3-tuple)"""
+	return isinstance(x, tuple) and len(x) == 3
+
+
+### TRANSFORMATIONS ###
+
+def h_transform(h):
+	"""Transform to growth direction (1,1,h).
+	x = (h,h,2), y = (-1,1,0), z = (1,1,h).
+	This is equivalent to growth direction (k, k, l) with h = l/k.
+
+	Argument:
+	h    Numeric value.
+
+	Returns:
+	A numpy array of shape (3, 3)
+	"""
+	c1 = 1 / np.sqrt(2 * h**2 + 4)
+	c2 = 0.5 * np.sqrt(2)
+	c3 = 1 / np.sqrt(h**2 + 2)
+	rr = np.array([
+		[ h * c1, h * c1, -2 * c1],
+		[    -c2,     c2,     0.0],
+		[     c3,     c3,  h * c3]], dtype = float)
+	return rr
+
+def axis_transform(zaxis, yaxis = None, xaxis = None):
+	"""Transform towards growth axis (zaxis) and perpendicular axes
+
+	Arguments:
+	zaxis     Tuple, list, or array of length 3. The growth axis in lattice
+	          coordinates.
+	yaxis     None or a tuple, list, or array of length 3. The transversal axis
+	          in lattice coordinates. If None, use the default direction for
+	          this axis based on the zaxis (if xaxis is None) or the axis
+	          orthogonal to the zaxis and xaxis (if xaxis is set).
+	xaxis     None or a tuple, list, or array of length 3. The longitudinal axis
+	          in lattice coordinates. If None, use the default direction for
+	          this axis based on the zaxis (if yaxis is None) or the axis
+	          orthogonal to the zaxis and yaxis (if yaxis is set).
+
+	Note:
+	The specified axes must be orthogonal. If all three are defined, they must
+	define a right-handed coordinate system. If this condition is not satisfied,
+	an error is raised.
+
+	Returns:
+	A numpy array of shape (3, 3)
+	"""
+	if isinstance(zaxis, (tuple, list, np.ndarray)) and len(zaxis) == 3:
+		zaxis = np.asarray(zaxis)
+	else:
+		raise TypeError("zaxis must be a list/tuple/array of length 3")
+
+	if isinstance(yaxis, (tuple, list, np.ndarray)) and len(yaxis) == 3:
+		yaxis = np.asarray(yaxis)
+	elif yaxis is not None:
+		raise TypeError("yaxis must be a list/tuple/array of length 3")
+	if isinstance(xaxis, (tuple, list, np.ndarray)) and len(xaxis) == 3:
+		xaxis = np.asarray(xaxis)
+	elif xaxis is not None:
+		raise TypeError("xaxis must be a list/tuple/array of length 3")
+
+	if yaxis is None and xaxis is None:
+		if zaxis[0] == 0 and zaxis[1] == 0:
+			yaxis = np.array([0, 1, 0])  # np.array([-1, 1, 0]) ?
+		else:
+			yaxis = np.array([-zaxis[1], zaxis[0], 0])
+		xaxis = np.cross(yaxis, zaxis)
+	elif xaxis is None:
+		xaxis = np.cross(yaxis, zaxis)
+	elif yaxis is None:
+		yaxis = np.cross(zaxis, xaxis)
+
+	rr = np.array([
+		norm_vec(xaxis),
+		norm_vec(yaxis),
+		norm_vec(zaxis)], dtype = float)
+	if not isorthogonal(rr):
+		raise ValueError("Transformation is not orthogonal or det != 1.")
+	return rr
+
+def rotation_z(phi, degrees = False):
+	"""Rotate around z axis by angle phi.
+
+	Arguments:
+	phi      Float. The angle.
+	degrees  True or False. If True, interpret phi as value in degrees. If
+	         False, interpret phi as value in radians.
+
+	Returns:
+	A numpy array of shape (3, 3)
+	"""
+	if degrees:
+		phi *= np.pi / 180
+	rr = np.array([
+		[ np.cos(phi), np.sin(phi), 0.0],
+		[-np.sin(phi), np.cos(phi), 0.0],
+		[         0.0,        0.0,  1.0]], dtype = float)
+	return rr
+
+def rotation_zxz(alpha, beta, gamma, degrees = False):
+	"""Euler rotation in order 'z, x, z'.
+
+	Arguments:
+	alpha   Angle of rotation around c axis.
+	beta    Angle of rotation around N axis (lies in xy plane). This is the
+	        angle between z axis and x axis.
+	gamma   Angle ofrotation around z axis.
+
+	Notes:
+	The rotations are done in the order 'alpha, beta, gamma'.
+	We rotate the coordinate frame (x, y, z) in the lattice frame (a, b, c).
+	The first, second, and third rows of the transformation matrix represent
+	longitudinal (x), transversal (y), and vertical/growth (z) direction in
+	(a, b, c) coordinates, respectively.
+	For example, growth direction (111) corresponds to
+	  alpha = -45 deg, beta = -arccos(1/sqrt(3)) ~ -55 deg, gamma = 90 deg
+	or equivalently
+	  alpha = 135 deg, beta = arccos(1/sqrt(3)) ~ 55 deg, gamma = -90 deg.
+
+  	Returns:
+	A numpy array of shape (3, 3)
+	"""
+	if degrees:
+		alpha *= np.pi / 180
+		beta *= np.pi / 180
+		gamma *= np.pi / 180
+	c1, s1 = np.cos(alpha), np.sin(alpha)
+	c2, s2 = np.cos(beta), np.sin(beta)
+	c3, s3 = np.cos(gamma), np.sin(gamma)
+	rr = np.array([
+		[ c1 * c3 - c2 * s1 * s3, -c1 * s3 - c2 * s1 * c3,  s2 * s1],
+		[ s1 * c3 + c2 * c1 * s3, -s1 * s3 + c2 * c1 * c3, -s2 * c1],
+		[                s2 * s3,                 s2 * c3,  c2     ]], dtype = float).T  # Note transposition!
+	return rr
+
+### WRAPPER FUNCTION ###
+
+def lattice_transform(orient, chop_at = 1e-13):
+	"""Wrapper function that returns transformation matrix for different orientation patterns.
+
+	Arguments:
+	orient     If None, return identity transformation, if numeric (int or
+	           float), return rotation around z axis. If a list of length 1, 2,
+	           or 3, detect whether the elements are angles (numeric) or
+	           directions (3-tuples) and return the appropriate transformation
+	           for that pattern. See README, 'orient' argument, for more
+	           information on the possible patterns.
+	chop_at    Float. Set almost-zero entries (smaller than this number) in
+	           transformation matrix to 0.
+
+	Returns:
+	A numpy array of shape (3, 3)
+	"""
+	if orient is None:
+		return np.identity(3)
+	if isinstance(orient, (int, float, np.integer, np.floating)):
+		rr = rotation_z(orient, degrees = True)
+		rr[np.abs(rr) < chop_at] = 0.0
+		return rr
+	if not isinstance(orient, list):
+		raise TypeError("Argument orient must be None, numeric, or a list of length 1, 2, 3.")
+	if not all([x is None or isangle(x) or isdir(x) for x in orient]):
+		raise TypeError("The elements of argument orient must be None, numeric, or 3-tuples")
+
+	if len(orient) == 1:
+		if isangle(orient[0]):
+			rr = rotation_z(orient[0], degrees = True)
+		elif isdir(orient[0]):
+			rr = axis_transform([0, 0, 1], xaxis = orient[0])
+		else:
+			raise ValueError("Invalid pattern for argument orient")
+	elif len(orient) == 2:
+		if isangle(orient[0]) and isangle(orient[1]):
+			rr = rotation_zxz(0.0, orient[0], orient[1], degrees = True)
+		elif isangle(orient[0]) and isdir(orient[1]):
+			rr = rotation_z(orient[0], degrees = True) @ axis_transform(orient[1])
+		elif isdir(orient[0]) and isangle(orient[1]):
+			rr = rotation_z(orient[1], degrees = True) @ axis_transform(orient[0])
+		elif (isdir(orient[0]) or orient[0] is None) and isdir(orient[1]):
+			rr = axis_transform(orient[1], xaxis = orient[0])
+		else:
+			raise ValueError("Invalid pattern for argument orient")
+	elif len(orient) == 3:
+		if isangle(orient[0]) and isangle(orient[1]) and isangle(orient[2]):
+			rr = rotation_zxz(*orient, degrees = True)
+		elif (isdir(orient[0]) or orient[0] is None) and isdir(orient[1]) and isdir(orient[2]):
+			rr = axis_transform(orient[2], yaxis = orient[1], xaxis = orient[0])
+		else:
+			raise ValueError("Invalid pattern for argument orient")
+	else:
+		raise ValueError("Invalid pattern for argument orient")
+
+	rr[np.abs(rr) < chop_at] = 0.0
+	return rr
diff --git a/kdotpy-v1.0.0/src/kdotpy/layerstack.py b/kdotpy-v1.0.0/src/kdotpy/layerstack.py
new file mode 100644
index 0000000000000000000000000000000000000000..2b938b9a7e2c687893a5479797351f20b9e73336
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/layerstack.py
@@ -0,0 +1,589 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import sys
+from math import ceil
+import numpy as np
+
+from .config import get_config_bool
+from .materials import Material
+from .physconst import hbarm0
+
+
+### LAYER NAMES
+default_layer_names = {'a': 'automatic', 'b': 'barrier', 'c': 'cap', 'd': 'doping', 'e': 'dielectric', 'q': 'well', 's': 'spacer', 'w': 'well', 'x': 'automatic'}
+
+### MATERIAL PARAMETERS TO CONSIDER
+layer_parameters = [
+	'Ec', 'Ev', 'P', 'F', 'epsilonxx', 'epsilonyy', 'epsilonzz', 'epsilonyz',
+	'epsilonxz', 'epsilonxy', 'strain_C1', 'strain_Dd', 'strain_Du',
+	'strain_Duprime', 'gamma1', 'gamma2', 'gamma3', 'kappa', 'ge', 'q',
+	'exch_yNalpha', 'exch_yNbeta', 'exch_g', 'exch_TK0', 'diel_epsilon',
+	'delta_so', 'bia_b8m', 'bia_b8p', 'bia_b7', 'bia_c'
+]
+
+### INTERPOLATION FUNCTIONS
+
+def interpolate_layer_weight(z, z1, z2, delta_if):
+	"""Interpolation of layer weight.
+
+	Arguments:
+	z         Array of z values
+	z1        Number. Minimum z value of the layer.
+	z2        Number. Maximum z value of the layer.
+	delta_if  Width of the interface smoothing.
+	"""
+	return 0.5 * (np.tanh((z - z1) / delta_if) - np.tanh((z - z2) / delta_if))
+
+def dz_interpolate_layer_weight(z, z1, z2, delta_if):
+	"""Exact (formal) derivative of the function interpolate_layer_weight()."""
+	return -0.5 * (np.tanh((z - z1) / delta_if)**2 - np.tanh((z - z2) / delta_if)**2) / delta_if
+
+### OTHER HELPER FUNCTIONS
+
+def normalize_layer_weights(lw, by_lw = None):
+	"""Normalize layer weights, such that their sum is equal to one.
+
+	Arguments:
+	lw     Array of shape (n_layers, n_zpoints). The layer weights of all layers
+	       as function of z.
+	by_lw  None or array of shape (n_layers, n_zpoints). If None, the
+	       denominators at each z value are the sum of the layer weights. If
+	       by_lw is set, use this array instead for the denominators.
+
+	Return:
+	Array of the same shape as lw with the normalized layer weights.
+	"""
+	sum_lw = np.sum(lw, axis = 0) if by_lw is None else np.sum(by_lw, axis = 0)
+	return np.where(sum_lw == 0.0, np.zeros_like(lw), lw / sum_lw)
+
+### LayerStack CLASS ###
+
+class LayerStack:
+	"""Container class for the layer stack, an ordered list of materials and thicknesses.
+
+	Attributes:
+	matdef_orbitals    6 or 8. Indicates that the values of the Luttinger
+	                   parameters and other band properties are defined with
+	                   respect to the 6- or 8-orbital k.p model, respectively.
+	                   It is initially set to 8, but may be changed to 6 using
+	                   the function remormalize_to(6).
+	bulk               False or True. True indicates that there is translational
+	                   invariance in the z direction. In other words, that the z
+	                   direction is a momentum (not a spatial) direction.
+	nlayer             Integer. Number of layers
+	materials          Tuple of length nlayer. Contains material parameters.
+	thicknesses_z      List of length nlayer. Layer thicknesses in nm.
+	thicknesses_n      List of length nlayer. Layer thicknesses in lattice
+	                   points.
+	zres               Discretization step, i.e., nm per lattice point.
+	names              List of strings, length nlayer + 1. The labels/roles of
+	                   the layers.
+	zinterface_nm      List of length nlayer + 1. The z coordinates of the
+	                   interfaces in nm, starting at 0.0 at the bottom.
+	zinterface         List of length nlayer + 1. The z coordinates of the
+	                   interfaces in lattice points, starting at 0 at the
+	                   bottom.
+	lz_thick           Float. Total thickness in nm.
+	nz                 Integer. Total number of lattice points in z direction.
+	surface_density    Density of carriers in the layer. FOR FUTURE USE.
+
+	Note:
+	Some attributes may be undefined if bulk is True.
+	"""
+	def __init__(self, materialparams, thicknesses, names = None, zres = 0.0):
+		self.matdef_orbitals = 8
+		if thicknesses is None or thicknesses == "bulk":
+			self.bulk = True
+		else:
+			self.bulk = False
+		if isinstance(materialparams, (tuple, list)):
+			if len(materialparams) == 0:
+				sys.stderr.write("ERROR (LayerStack): Argument must at least one material.\n")
+				exit(1)
+			else:
+				for m in materialparams:
+					if not (isinstance(m, dict) and 'material' in m and isinstance(m['material'], Material)):
+						sys.stderr.write("ERROR (LayerStack): Argument must at least be one set of material parameters.\n")
+						exit(1)
+			if self.bulk and len(materialparams) > 1:
+				sys.stderr.write("Warning (LayerStack): In bulk mode, only the first specified material is taken into account.\n")
+				self.materials = (materialparams[0],)
+			else:
+				self.materials = tuple(materialparams)
+		elif isinstance(materialparams, dict) and 'material' in materialparams and isinstance(materialparams['material'], Material):
+			self.materials = (materialparams,)
+		else:
+			sys.stderr.write("ERROR (LayerStack): Argument must at least be one set of material parameters.\n")
+			exit(1)
+
+		# Do some checks
+		for mat in self.materials:
+			missing_param = [param for param in layer_parameters if param not in mat]
+			if len(missing_param) > 0:
+				sys.stderr.write("ERROR (LayerStack): Missing material parameters %s.\n" % ", ".join(missing_param))
+				exit(1)
+			nonnumeric_param = [param for param, val in mat.items() if param in layer_parameters and not isinstance(val, (float, np.floating, int, np.integer))]
+			if len(nonnumeric_param) > 0:
+				undef_var = set()
+				for param in nonnumeric_param:
+					try:
+						undef_var |= set(mat[param].undefined_variables)
+					except:
+						raise
+				sys.stderr.write("ERROR (LayerStack): Non-numeric material parameters %s. Possibly missing for evaluation: %s\n" % (", ".join(nonnumeric_param), ", ".join(list(undef_var))))
+				exit(1)
+
+		if not self.bulk:
+			if isinstance(thicknesses, (tuple, list)):
+				for d in thicknesses:
+					if not isinstance(d, (float, np.floating, int, np.integer)) and d >= 0.0:
+						sys.stderr.write("ERROR (LayerStack): Thicknesses must be numbers >= 0.\n")
+						exit(1)
+				self.thicknesses_z = list(thicknesses)
+			elif isinstance(thicknesses, (float, np.floating, int, np.integer)) and thicknesses >= 0.0:
+				self.thicknesses_z = [thicknesses]
+			else:
+				sys.stderr.write("ERROR (LayerStack): The number of specified layer thicknesses must be equal to the number of layers.\n")
+				exit(1)
+			if len(self.thicknesses_z) != len(self.materials):
+				sys.stderr.write("ERROR (LayerStack): The number of specified layer thicknesses must be equal to the number of layers.\n")
+				exit(1)
+
+		self.nlayer = len(self.materials)
+		if names is None:
+			if self.nlayer == 1:
+				self.names = ["well"]
+			elif self.nlayer == 2:
+				self.names = ["barrier", "well"]
+			elif self.nlayer == 3:
+				self.names = ["barrier_bottom", "well", "barrier_top"]
+			else:
+				self.names = ["layer%i" % (i + 1) for i in range(0, self.nlayer)]
+		elif len(names) != self.nlayer:
+			sys.stderr.write("ERROR (LayerStack): The number of specified layer names must be equal to the number of layers.\n")
+			exit(1)
+		else:
+			self.names = list(names)
+
+		# finite lattice
+		self.zres = zres
+		if zres > 0.0:
+			thicknesses_z_input = np.asarray(self.thicknesses_z)
+			self.thicknesses_n = [int(ceil(d / zres)) for d in self.thicknesses_z]
+			self.thicknesses_z = [zres * dn for dn in self.thicknesses_n]
+			# Commensurability check
+			delta_z = np.abs(np.asarray(thicknesses_z_input) - np.asarray(self.thicknesses_z))
+			if len(delta_z) > 0 and np.amax(delta_z) > 0.99e-3 * zres:
+				if get_config_bool('lattice_zres_strict'):
+					sys.stderr.write("ERROR (LayerStack): Thickness of the layers is not commensurate with the z resolution. Change z resolution or layer thicknesses; or set configuration option 'lattice_zres_strict=false' to ignore this error.\n")
+					exit(1)
+				else:
+					sys.stderr.write("Warning (LayerStack): Thickness of the layers is not commensurate with the z resolution. Layer thicknesses have been changed to (%s) nm.\n" % (", ".join(["%g" % d for d in self.thicknesses_z])))
+		else:
+			self.thicknesses_n = None
+
+		# interface coordinate; in nm and in lattice units
+		if not self.bulk:
+			self.zinterface_nm = [0.0]
+			z = 0.0
+			for d in self.thicknesses_z:
+				z += d
+				self.zinterface_nm.append(z)
+			self.lz_thick = self.zinterface_nm[-1]
+
+		if not self.bulk and zres > 0.0:
+			self.zinterface = [0]
+			pos = 0
+			for dn in self.thicknesses_n:
+				pos += dn
+				self.zinterface.append(pos)
+			self.nz = self.zinterface[-1] + 1
+
+		self.surface_density = None
+
+	def layer_index(self, z):
+		"""Given coordinate z, return the layer index in which it lies.
+
+		Argument:
+		z   Integer, float, or string. The z coordinate. If integer, then treat
+		    z as a coordinate in number of lattice points. If float, then treat
+		    z as a coordinate in nm. If a string, then return the index of the
+		    layer with that name (succeeds only if attributes names is set).
+
+		Returns:
+		Integer. Layer index, where 0 is the bottom layer.
+		"""
+		if self.bulk:
+			return 0
+		elif isinstance(z, (float, np.floating)):
+			if z < 0.0 or z > self.lz_thick:
+				return None
+			if abs(z) < 1e-3:
+				return 0
+			if abs(self.lz_thick - z) < 1e-3:
+				return self.nlayer - 1
+			for j in range(0, self.nlayer):
+				z1 = self.zinterface_nm[j]
+				z2 = self.zinterface_nm[j + 1]
+				if abs(z - z1) < 1e-3:
+					return j - 0.5
+				elif abs(z - z2) < 1e-3:
+					return j + 0.5
+				elif z1 < z < z2:
+					return j
+		elif isinstance(z, (int, np.integer)):
+			if self.zres <= 0.0:
+				sys.stderr.write("ERROR (layer_index): Discretization is not possible for non-positive resolution\n")
+				exit(1)
+
+			if z < 0 or z > self.nz:
+				return None
+			if z == 0:
+				return 0
+			if z == self.nz:
+				return self.nlayer - 1
+			for j in range(0, self.nlayer):
+				z1 = self.zinterface[j]
+				z2 = self.zinterface[j + 1]
+				if z == z1:
+					return j - 0.5
+				elif z == z2:
+					return j + 0.5
+				elif z1 < z < z2:
+					return j
+		elif isinstance(z, str):
+			if self.names is None or len(self.names) != self.nlayer:
+				sys.stderr.write("Warning (layer_index): Layer names are not specified\n")
+				return None
+			for j in range(0, self.nlayer):
+				if self.names[j] == z:
+					return j
+			return None
+		else:
+			sys.stderr.write("ERROR (layer_index): Argument must be a number (floating point in nm, integer in number of lattice points) or a string (its name)\n")
+			exit(1)
+
+	def mparam_layer(self, j):
+		"""Get material parameters for layer with index j."""
+		if j < 0 or j >= self.nlayer:
+			raise IndexError("Layer index out of range")
+		return self.materials[j]
+
+	def get_strain_matrix(self, j, transform = None):
+		if not isinstance(j, (int, np.integer)):
+			raise TypeError("Layer index must be integer")
+		if j < 0 or j >= self.nlayer:
+			raise IndexError("Layer index out of range")
+		param_mat = self.materials[j]
+		if not ('epsilonxx' in param_mat and 'epsilonyy' in param_mat and 'epsilonzz' in param_mat):
+			return None
+		epsilon_matrix = np.diag([param_mat['epsilon' + co] for co in ['xx', 'yy', 'zz']])
+		if 'epsilonxy' in param_mat:
+			epsilon_matrix[0, 1] = param_mat['epsilonxy']
+			epsilon_matrix[1, 0] = param_mat['epsilonxy']
+		if 'epsilonxz' in param_mat:
+			epsilon_matrix[0, 2] = param_mat['epsilonxz']
+			epsilon_matrix[2, 0] = param_mat['epsilonxz']
+		if 'epsilonyz' in param_mat:
+			epsilon_matrix[1, 2] = param_mat['epsilonyz']
+			epsilon_matrix[2, 1] = param_mat['epsilonyz']
+		if isinstance(transform, np.ndarray):
+			if not transform.shape == (3, 3):
+				raise ValueError("Transformation matrix must be 3x3.")
+			return np.asarray(transform @ (epsilon_matrix @ transform.T))
+		elif transform is None:
+			return np.asarray(epsilon_matrix)
+		else:
+			raise TypeError("Argument 'transform' must be a numpy array of shape (3, 3) or None.")
+
+	def __getitem__(self, j):
+		"""Get properties of layer with index j.
+
+		Returns:
+		material          A dict with material parameters.
+		(zmin, dz, zmax)  A 3-tuple with the z coordinate in nm of the bottom
+		                  interface, the thickness, and the coordinate of the
+		                  top interface.
+		name              String with the layer name/label. None if attribute
+		                  names is not set.
+		"""
+		if not isinstance(j, (int, np.integer)):
+			raise IndexError("Index must be an integer.")
+		if j < 0 or j >= self.nlayer:
+			raise IndexError("Index out of range.")
+		z = (self.zinterface_nm[j], self.thicknesses_z[j], self.zinterface_nm[j + 1])
+		try:
+			name = self.names[j]
+		except:
+			name = None
+		return self.materials[j], z, name
+
+	def renormalize_to(self, target_orbitals):
+		"""Renormalize band parameters to different number of orbitals.
+		If one changes from an 8-orbital to 6-orbital k.p model, then some band
+		parameters need to be changed to different values ('renormalized') to
+		preserve the behaviour of the dispersion (in particular the band mass)
+		near k = 0.
+
+		Argument:
+		target_orbitals   6 or 8. Renormalize to this number of orbitals. If
+		                  this number is the same as the attribute
+		                  matdef_orbitals, then nothing is done.
+
+		Returns:
+		self
+		"""
+		if target_orbitals == self.matdef_orbitals:
+			return self
+		for m in self.materials:
+			if target_orbitals == 6 and self.matdef_orbitals >= 8:
+				Ep = m['P']**2 / hbarm0
+				m['F'] += Ep / (m['Ec'] - m['Ev'] + m['delta_so']) / 6.
+				m['ge'] += Ep / (m['Ec'] - m['Ev'] + m['delta_so']) * 2. / 3.
+		print("Renormalization %i orbitals -> %i orbitals" % (self.matdef_orbitals, target_orbitals))
+		self.matdef_orbitals = target_orbitals
+		return self
+
+	def set_density(self, densities, surface = True):
+		"""FOR FUTURE USE - Set surface density in each layer
+
+		Arguments:
+		densities   List or array of length nlayer containing density values.
+		surface     True or False. True if the values are surface densities
+		            (unit 1/nm^2). False if the values are bulk/volume densities
+		            (unit 1/nm^3).
+		"""
+		if len(densities) != self.nlayer:
+			raise ValueError("Number of density values must equal the number of layers.")
+		if surface:
+			self.surface_density = [float(d) for d in densities]
+		else:  # input is volume density
+			self.surface_density = [float(dens * th) for dens, th in zip(densities, self.thicknesses_z)]
+
+	def get_density(self, z, nm = False, extend = True):
+		"""FOR FUTURE USE - Get surface density as function of z.
+
+		Arguments:
+		z       Number or an array. The z coordinates where the density is
+		        evaluated.
+		nm      True or False. If True, treat the values of z as coordinates in
+		        nm. If False, treat these values as coordinates in lattice
+		        points.
+		extend  True or False. If True (default), the bottom and top interface
+		        of the whole layer stack are treated as being at -infinity and
+		        +infinity, respectively. (In practice, huge numbers.) This helps
+		        to prevent artifacts in the density at these locations.
+
+		Returns:
+		dens   Number or an array (like argument z), containing volume densities
+		       (as function of z).
+		"""
+		if self.surface_density is None:
+			return np.zeros_like(z)
+		z = 1. * z  # force float
+
+		if nm:  # length units in nm
+			interface = [zz for zz in self.zinterface_nm]   # force copy
+		else:   # length in lattice units
+			interface = [zz for zz in self.zinterface]   # force copy
+
+		if extend:   # if True, bottom and top surfaces are not treated as interfaces
+			interface[0] = -1e10   # a huge number
+			interface[-1] = 1e10   # a huge number
+
+		dens = np.zeros_like(z)
+		for j in range(0, self.nlayer):
+			if self.thicknesses_z[j] < 1e-9:
+				continue
+			vdens = self.surface_density[j] / self.thicknesses_z[j]
+			dens[(z > interface[j] + 1e-6) & (z < interface[j+1] - 1e-6)] += vdens  # bulk
+			dens[(z >= interface[j] - 1e-6) & (z <= interface[j] + 1e-6)] += 0.5 * vdens  # bottom edge
+			dens[(z >= interface[j+1] - 1e-6) & (z <= interface[j+1] + 1e-6)] += 0.5 * vdens  # top edge
+		return dens
+
+	def make_param_cache(self, z, dz = 0.0, delta_if = None, nm = False, extend = True):
+		"""Make parameter cache: Calculate parameter values as function of z		position in the layer stack.
+		Performs appropriate interpolation.
+
+		Arguments:
+		z         The z values at which to calculate the parameters; should be
+		          a numpy array of numbers.
+		dz        The "delta" of the discrete derivative; if 0, use the exact
+		          derivative.
+		delta_if  Characteristic width of the interpolation function; if 0, the
+		          interfaces are sharp.
+		nm        If set to True, then treat all distances (three arguments
+		          above) as lengths in nm. If set to False (default), then the
+		          distances are in lattice units.
+		extend    If set to True (default), treat the first and last layer as
+		          being extended to infinity. This eliminates boundary effects
+		          with the "vacuum" (all parameters = 0). If the bottom and top
+		          interfaces should be treated as interfaces with the vacuum,
+		          set to False.
+
+		Note:
+		Calling this function in order to obtain the parameters is not
+		preferred. Use param_z() instead, which will call make_param_cache() if
+		necessary.
+
+		Returns:
+		A dict instance. The keys are strings labelling the variables. The
+		values are numbers or arrays with the parameter values as function of z.
+		"""
+		if not isinstance(z, np.ndarray):
+			raise ValueError("Argument z must be a numpy array")
+
+		z = 1. * z  # force float
+
+		if nm:  # length units in nm
+			interface = [zz for zz in self.zinterface_nm]   # force copy
+		else:   # length in lattice units
+			interface = [zz for zz in self.zinterface]   # force copy
+
+		if extend:   # if True, bottom and top surfaces are not treated as interfaces
+			interface[0] = -1e10   # a huge number
+			interface[-1] = 1e10   # a huge number
+
+		ones = np.ones_like(z)
+		zeros = np.zeros_like(z)
+
+		## Calculate layer weights
+		if delta_if is None or delta_if == 0.0:  # no interpolation
+			layer_weights = np.array([np.where((z >= (interface[j] - 1e-3)) & (z <= (interface[j+1] + 1e-3)), ones, zeros) for j in range(0, self.nlayer)])
+			delta_if = 0.0
+		elif delta_if > 0.0:
+			layer_weights = np.array([interpolate_layer_weight(z, interface[j], interface[j + 1], delta_if) for j in range(0, self.nlayer)])
+		else:
+			raise ValueError("Argument delta_if should be either a positive number, or 0 or None.")
+
+		norm_layer_weights = normalize_layer_weights(layer_weights)
+
+		## Calculate derivatives
+		if delta_if == 0.0:
+			dz_norm_layer_weights = np.array([zeros for j in range(0, self.nlayer)])
+		elif dz == 0.0:
+			dz_layer_weights = np.array([dz_interpolate_layer_weight(z, interface[j], interface[j + 1], delta_if) for j in range(0, self.nlayer)])  # exact derivative
+			sum_layer_weights = np.sum(layer_weights, axis = 0)
+			sum_dz_layer_weights = np.sum(dz_layer_weights, axis = 0)
+			dz_norm_layer_weights = np.where(sum_layer_weights == 0.0, np.zeros_like(dz_layer_weights), (dz_layer_weights * sum_layer_weights - layer_weights * sum_dz_layer_weights) / sum_layer_weights**2)
+			dz_norm_layer_weights /= self.zres  # transform from lattice index coordinate to length coordinates
+		elif dz > 0.0:
+			layer_weights_p = np.array([interpolate_layer_weight(z + dz, interface[j], interface[j + 1], delta_if) for j in range(0, self.nlayer)])
+			layer_weights_m = np.array([interpolate_layer_weight(z - dz, interface[j], interface[j + 1], delta_if) for j in range(0, self.nlayer)])
+			norm_layer_weights_p = normalize_layer_weights(layer_weights_p)
+			norm_layer_weights_m = normalize_layer_weights(layer_weights_m)
+			dz_norm_layer_weights = (norm_layer_weights_p - norm_layer_weights_m) / (2 * dz * self.zres)
+		else:
+			raise ValueError("Argument dz should be a positive number or 0.")
+
+		# Cached parameters for observables
+		cache_param = {}
+		for v in layer_parameters:
+			q_i = np.array([mat[v] for mat in self.materials])
+			v_z = q_i[:, np.newaxis] * norm_layer_weights
+			cache_param[v] = np.sum(v_z, axis = 0)
+
+		# Cached parameters for 2 F + 1
+		cache_param['2Fplus1'] = 2.0 * cache_param['F'] + 1.0
+
+		# Cached parameters for derivatives
+		for v in ['F', 'gamma1', 'gamma2', 'gamma3', 'kappa']:
+			q_i = np.array([mat[v] for mat in self.materials])
+			dv_z = q_i[:, np.newaxis] * dz_norm_layer_weights
+			cache_param['dz' + v] = np.sum(dv_z, axis = 0)
+
+		# Cached parameters for strain (a, b, C, d)
+		cache_param['as'] = cache_param['strain_Dd']
+		cache_param['bs'] = -(2 / 3) * cache_param['strain_Du']
+		cache_param['cs'] = cache_param['strain_C1']
+		cache_param['ds'] = -(2 / np.sqrt(3)) * cache_param['strain_Duprime']
+
+		return cache_param
+
+	def param_z(self, z, dz = 0.0, delta_if = None, nm = False, extend = True):
+		"""Get parameters as function of z.
+		Performs appropriate interpolation. This function calls
+		make_param_cache() if necessary. This is the preferred function for
+		getting the parameters.
+
+		Arguments:
+		z         The z values at which to calculate the parameters; should be
+		          a numpy array of numbers.
+		dz        The "delta" of the discrete derivative; if 0, use the exact
+		          derivative.
+		delta_if  Characteristic width of the interpolation function; if 0, the
+		          interfaces are sharp.
+		nm        If set to True, then treat all distances (three arguments
+		          above) as lengths in nm. If set to False (default), then the
+		          distances are in lattice units.
+		extend    If set to True (default), treat the first and last layer as
+		          being extended to infinity. This eliminates boundary effects
+		          with the "vacuum" (all parameters = 0). If the bottom and top
+		          interfaces should be treated as interfaces with the vacuum,
+		          set to False.
+
+		Performance warning:
+		It is quite inefficient to call this function for single numbers
+		repeatedly. If you need to iterate, use an array instead.
+
+		Returns:
+		A dict instance. The keys are strings labelling the variables. The
+		values are numbers or arrays with the parameter values as function of z.
+		"""
+		# If z is an array, just call make_param_cache
+		if isinstance(z, np.ndarray):
+			return self.make_param_cache(z, dz, delta_if, nm, extend)
+		elif isinstance(z, (float, np.floating, int, np.integer)):
+			# Quite inefficient for single numbers, but this function should not really be called very frequently
+			cache_param = self.make_param_cache(np.array([z]), dz, delta_if, nm, extend)
+			param = {}
+			for v in cache_param:
+				param[v] = cache_param[v][0]
+			return param
+		else:
+			raise ValueError("Argument z must be array or number.")
+
+	def has_exchange(self):
+		"""Determine whether any layer in the layer stack has nonzero exchange coupling"""
+		for m in self.materials:
+			if (abs(m['exch_yNalpha']) > 1e-10 or abs(m['exch_yNbeta']) > 1e-10) and m['exch_g'] != 0.0:
+				return True
+		return False
diff --git a/kdotpy-v1.0.0/src/kdotpy/lltools.py b/kdotpy-v1.0.0/src/kdotpy/lltools.py
new file mode 100644
index 0000000000000000000000000000000000000000..1551c56c42c21bc50b2dee11c0fd903aff9a3002
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/lltools.py
@@ -0,0 +1,153 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import sys
+import numpy as np
+
+from .momentum import Vector
+
+def delta_n_ll(norb, sign_magn = 1):
+	"""Return the offsets of the LL indices (+/- offset of Jz) for the basis states
+
+	Arguments:
+	norb       6 or 8. Number of orbitals
+	sign_magn  Numeric or Vector (only z component evaluated).
+			   The (sign of the) magnetic field. It only matters
+	           whether this number is positive (>= 0) or negative.
+
+	Returns:
+	Array of length norb with the offsets.
+	"""
+	if isinstance(sign_magn, Vector):
+		sign_magn = sign_magn.z()
+	return np.array([0, 1, -1, 0, 1, 2, 0, 1])[:norb] if sign_magn >= 0 else np.array([1, 0, 2, 1, 0, -1, 1, 0])[:norb]
+
+def whichbands_ll(nll, norb, sign_magn = 1):
+	"""Return the indices of the nonzero components of the basis given a Landau level index
+
+	Arguments:
+	nll        Integer. The LL index.
+	norb       6 or 8. Number of orbitals
+	sign_magn  Numeric or Vector (only z component evaluated).
+			   The (sign of the) magnetic field. It only matters
+	           whether this number is positive (>= 0) or negative.
+
+	Returns:
+	Array of length norb with the basis indices (integers from 0 to norb-1).
+	"""
+	if isinstance(sign_magn, Vector):
+		sign_magn = sign_magn.z()
+	delta_n_vec = np.array([0, 1, -1, 0, 1, 2, 0, 1])[:norb] if sign_magn >= 0 else np.array([1, 0, 2, 1, 0, -1, 1, 0])[:norb]
+	return np.arange(0, norb, dtype = int)[nll + delta_n_vec >= 0]
+
+def scaleup_eivec(eivecs, params, neig, nll, sign_magn = 1):
+	"""Make eigenvectors norb * nz long, by inserting zeros at the appropriate places
+	This function extends the smaller Hilbert space (fewer orbitals) of
+	low-index LL level states to the full set of orbitals (# = norbitals)
+
+	Arguments:
+	eivecs     Array, 2-dimensional. The eigenvectors.
+	params     PhysParams instance. Used for nz and norbitals.
+	neig       Integer. The number of eigenvectors in the input.
+	nll        Integer. The LL index.
+	sign_magn  Numeric or Vector (only z component evaluated).
+			   The (sign of the) magnetic field. It only matters
+	           whether this number is positive (>= 0) or negative.
+
+	Returns:
+	New array of eigenvectors, in the extended Hilbert space.
+	"""
+	nz = params.nz
+	norb = params.norbitals
+	if nll > 0:
+		if eivecs.shape[0] == norb * nz:
+			eivecs = eivecs.transpose()
+		if eivecs.shape[1] != norb * nz:
+			raise ValueError("Invalid vector size")
+		return eivecs
+
+	whichbands = whichbands_ll(nll, norb, sign_magn)
+	nbands = len(whichbands)
+
+	if eivecs.shape[0] == nbands * nz:
+		eivecs = eivecs.transpose()
+	if eivecs.shape[1] != nbands * nz:
+		raise ValueError("Invalid vector size")
+
+	eivecs1 = np.zeros((neig, norb * nz), dtype = complex)
+	indices = norb * np.repeat(np.arange(0, nz), nbands) + np.tile(np.asarray(whichbands), nz)
+	xx, yy = np.meshgrid(np.arange(0, neig), indices)
+	eivecs1[xx.T, yy.T] = eivecs
+
+	return eivecs1
+
+def scaleup_full_eivec(eivecs, params, neig, ll_max, sign_magn):
+	"""Make eigenvectors appropriately long, by inserting zeros at the appropriate places, version for full LL mode.
+	This function extends the smaller Hilbert space (fewer orbitals) of
+	low-index LL level states to the full set of orbitals (# = norbitals)
+
+	Arguments:
+	eivecs     Array, 2-dimensional. The eigenvectors.
+	params     PhysParams instance. Used for nz and norbitals.
+	neig       Integer. The number of eigenvectors in the input.
+	ll_max     Integer. The maximum LL index.
+	sign_magn  Numeric or Vector (only z component evaluated).
+			   The (sign of the) magnetic field. It only matters
+	           whether this number is positive (>= 0) or negative.
+
+	Returns:
+	New array of eigenvectors, in the extended Hilbert space.
+	"""
+	delta_n_vec = delta_n_ll(params.norbitals, sign_magn)
+	nz = params.nz
+	sizes = nz * np.array([np.count_nonzero(n + delta_n_vec >= 0) for n in range(-2, ll_max + 1)])
+	indices = np.concatenate(([0], np.cumsum(sizes)))
+	fullsize = indices[-1]
+	if eivecs.shape[0] == fullsize:
+		eivecs = eivecs.transpose()
+	if eivecs.shape[1] != fullsize:
+		sys.stderr.write("ERROR (ScaleUp_Full_Eivec): Invalid vector size\n")
+		exit(1)
+
+	alleivecs = []
+	for nll in range(-2, ll_max + 1):
+		eivecs1 = eivecs[:, indices[nll + 2]:indices[nll + 3]]
+		alleivecs.append(scaleup_eivec(eivecs1, params, neig, nll, sign_magn))
+	return np.hstack(alleivecs)
diff --git a/kdotpy-v1.0.0/src/kdotpy/main.py b/kdotpy-v1.0.0/src/kdotpy/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..20dc9adbe5cfb07083e304e23cf843ae187f7c6e
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/main.py
@@ -0,0 +1,105 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import sys
+import os
+import importlib
+from .doc import doc as kdotpy_doc
+from .version import get_version
+
+copyright_message = """kdotpy - Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+This program is licensed under the GNU General Public License, version 3.
+Please view LICENSE, LICENSE.additional, README.md, and CITATION.md for more
+information.\n\n"""
+
+try:
+	sourcedir = __path__
+except NameError:
+	sourcedir = os.path.dirname(os.path.realpath(__file__))
+	
+def help():
+	helpfile = os.path.join(sourcedir, 'docs', 'helpfile.txt')
+	if len(sys.argv) > 2:
+		cmdargs = ['less', '-i', '--pattern=' + " ".join(sys.argv[2:]), helpfile]
+	else:
+		cmdargs = ['less', helpfile]
+	os.execvp(cmdargs[0], cmdargs)
+
+def doc():
+	if len(sys.argv) != 3:
+		sys.stderr.write("ERROR (kdotpy): kdotpy doc must be followed by one argument\n")
+		sys.exit(3)
+	kdotpy_doc(sys.argv[2])
+
+def version():
+	print(get_version())
+
+def run(cmd, *args):
+	scriptmodule = f'kdotpy.kdotpy-{cmd}'
+	script = importlib.import_module(scriptmodule)
+	script.main()
+
+def main():
+	if sys.version_info < (3, 9):
+		sys.stderr.write("ERROR (kdotpy): Python version 3.9 or higher is required.\n")
+		sys.exit(1)
+	if len(sys.argv) <= 1:
+		sys.stderr.write("ERROR (kdotpy): Missing argument\n")
+		sys.exit(3)
+	
+	if sys.argv[1] in ["help", "--help"]:
+		help()
+	elif sys.argv[1] in ["doc", "--doc"]:
+		doc()
+	elif sys.argv[1] in ["version", "--version"]:
+		version()
+	elif sys.argv[1] in ['1d', '2d', 'bulk', 'll', 'bulk-ll', 'merge', 'compare', 'batch']:
+		sys.stdout.write(copyright_message)
+		run(*sys.argv[1:])
+	elif sys.argv[1] in ['test', 'config']:
+		# No copyright message for kdotpy test and kdotpy config
+		run(*sys.argv[1:])
+	else:
+		sys.stderr.write("ERROR (kdotpy): Invalid script name\n")
+		sys.exit(3)
+
+if __name__ == "__main__":
+	main()
+	sys.exit(0)
diff --git a/kdotpy-v1.0.0/src/kdotpy/materials/__init__.py b/kdotpy-v1.0.0/src/kdotpy/materials/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..5414ce434f5a7907c75e12335c649a2bc2bbfdb4
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/materials/__init__.py
@@ -0,0 +1,43 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+from .materials import Material, MaterialsList
+from .materials import material_parameters_tex, material_parameters_units, formula_to_compound
+from .initial import initialize_materials, allMaterials
diff --git a/kdotpy-v1.0.0/src/kdotpy/materials/base.py b/kdotpy-v1.0.0/src/kdotpy/materials/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..cbacd99b07b8b051b373452144d7d40e872be51f
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/materials/base.py
@@ -0,0 +1,440 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import re
+import keyword
+import ast
+import operator
+import math
+import copy
+from .. import physconst
+
+### PREDEFINED FUNCTIONS ###
+def linint(a, b, x):
+	"""The function given by f(x) = a (1 - x) + b x"""
+	return a * (1 - x) + b * x
+
+def linearpoly(c0, c1, x):
+	"""The function given by f(x) = c0 + c1 x"""
+	return c0 + c1 * x
+
+def quadrpoly(c0, c1, c2, x):
+	"""The function given by f(x) = c0 + c1 x + c2 x^2"""
+	return c0 + c1 * x + c2 * x**2
+
+def cubicpoly(c0, c1, c2, c3, x):
+	"""The function given by f(x) = c0 + c1 x + c2 x^2 + c3 x^3"""
+	return c0 + c1 * x + c2 * x**2 + c3 * x**3
+
+def poly(*args):
+	"""The function given by f(x) = c0 + c1 x + ... + ck x^k"""
+	*coeff, x = args
+	return sum(c * x ** k for k, c in enumerate(coeff))
+
+### AST OBJECTS ###
+
+# The AST parser is adapted from:
+# https://stackoverflow.com/questions/15197673/using-pythons-eval-vs-ast-literal-eval
+
+mathfunc = [u for u in dir(math) if "__" not in u and callable(getattr(math, u))]
+phconst = [u for u in dir(physconst) if "__" not in u and not callable(getattr(physconst, u))]
+polyfunc = {
+	'linint': linint,
+	'linearpoly': linearpoly,
+	'quadrpoly': quadrpoly,
+	'cubicpoly': cubicpoly,
+	'poly': poly,
+}  # Polynomial functions, defined above. Call as poly(coeff, x).
+
+boolfunc = {
+	'geq': operator.ge,
+	'leq': operator.le,
+	'gtr': operator.gt,
+	'less': operator.lt
+}  # Boolean comparison functions for >=, <=, >, <. Call as fn(a, b) or fn(a, 0).
+
+binary_operators = {
+	ast.Add: operator.add,
+	ast.Sub: operator.sub,
+	ast.Mult: operator.mul,
+	ast.Div: operator.truediv,
+	ast.Pow: operator.pow,
+}
+
+unary_operators = {
+	ast.USub: operator.neg,
+	ast.UAdd: operator.pos,
+}
+
+class AstParameter:
+	"""Stores as abstract syntax tree (AST) from a string or an AST expression
+	This class parses a restricted subset of Python code, i.e., only
+	mathematical functions acting on numerical values. The purpose is to store
+	material parameter that depend on other material parameters, or on variables
+	such as x (concentration of an element) and/or T (temperature).
+	
+	Attributes:
+	value      Cached evaluated value.
+	raw        String. The string expression. If the instance is initialized
+	           from an AST expression, use the result of ast.unparse().
+	tree       AST expression. If the instance is initialized from a string, use
+	           the result of ast.parse().
+	verbose    True or False. If True, print debug information to stdout.
+	variables  A dict instance. The cached variable values used for evaluation
+	           or substitution.
+	undefined_variables
+	           Set. Variable ids in the instance that have not been substituted
+	           in the last evaluation.
+	"""
+	def __init__(self, arg, verbose=False):
+		self.value = None
+		if isinstance(arg, str):
+			self.raw = arg
+			self.tree = ast.parse(arg, mode='eval')
+		elif isinstance(arg, ast.Expression):
+			self.raw = ast.unparse(arg)
+			self.tree = arg
+		else:
+			raise TypeError("Input argument must be a str or an ast.Expression instance")
+		self.variables = {}
+		self.undefined_variables = set()
+		self.verbose = verbose
+		if self.verbose:
+			print(f"ast.parse({arg}):" if isinstance(arg, str) else f"ast.parse({type(arg)}):")
+			print(ast.dump(self.tree, indent=4))
+
+	def evaluate(self, **variables):
+		"""Evaluate the instance by substitution of variables and return None if teh result is not numeric"""
+		self.variables = {var: val for var, val in variables.items() if isinstance(val, (float, int))}
+		self.undefined_variables = set()
+		result = self._eval_node(self.tree)
+		if isinstance(result, tuple):
+			self.value = None if any(math.isnan(e) for e in result) else result
+		elif isinstance(result, (float, int)):
+			self.value = None if math.isnan(result) else result
+		else:
+			raise TypeError("Value must be numeric or a tuple")
+		if self.verbose and len(self.undefined_variables) > 0:
+			print("AstParameter.__call__: Undefined variable%s %s" % ("" if len(self.undefined_variables) == 1 else "s", ", ".join(self.undefined_variables)))
+		return self.value
+
+	def substitute(self, **variables):
+		"""Evaluate the instance by substitution of variables and return the instance itself if the result is not numeric"""
+		value = self.evaluate(**variables)
+		return self if value is None else value
+
+	def get_dependencies(self, **variables):
+		"""Get variables that are required to evaluate to a numerical value"""
+		self.evaluate(**variables)
+		return self.undefined_variables
+
+	def expand(self):
+		"""Expand to a tuple of AstParameter instances if the present instance is a tuple at its root."""
+		if isinstance(self.tree.body, ast.Tuple):
+			return tuple(AstParameter(ast.Expression(el)) for el in self.tree.body.elts)
+		else:
+			return self
+
+	def __str__(self):
+		"""Return string representation, using ast.unparse()"""
+		return ast.unparse(self.tree)
+
+	def tex(self):
+		"""Very basic conversion of string to TeX"""
+		# TODO: Do it properly. See, for example
+		# https://stackoverflow.com/questions/3867028/converting-a-python-numeric-expression-to-latex
+		s = str(self)
+		s = re.sub(r' [*] ', r' ', s)  # mult
+		s = re.sub(r' [*][*] ([0-9A-Za-z]+)', r'^{\1}', s)  # pow
+		s = re.sub(r' [*][*] \((.+)\)', r'^{\1}', s)  # pow
+		s = re.sub(r'\b(pi|sin|cos|tan|log|exp)\b', r'\\\1', s)  # some common TeX macros
+		return s
+
+	def _eval_node(self, node):
+		"""Recursively evaluate the AST tree by substituting variables"""
+		if isinstance(node, ast.Expression):
+			return self._eval_node(node.body)
+		elif isinstance(node, ast.Constant):
+			return node.value
+		elif isinstance(node, ast.Tuple):
+			return tuple(self._eval_node(e) for e in node.elts)
+		elif isinstance(node, ast.BinOp):
+			left = self._eval_node(node.left)
+			right = self._eval_node(node.right)
+			if isinstance(node.op, ast.BitXor):
+				raise SyntaxError(f"Undefined binary operator {type(node.op).__name__}. For exponentiation, use **, not ^.")
+			if type(node.op) not in binary_operators:
+				raise SyntaxError(f"Undefined binary operator {type(node.op).__name__}")
+			return binary_operators[type(node.op)](left, right)
+		elif isinstance(node, ast.UnaryOp):
+			operand = self._eval_node(node.operand)
+			if type(node.op) not in unary_operators:
+				raise SyntaxError(f"Undefined unary operator {type(node.op).__name__}")
+			return unary_operators[type(node.op)](operand)
+		elif isinstance(node, ast.Call):
+			args = [self._eval_node(x) for x in node.args]
+			# kwds = {k.arg: self._eval_node(k.value) for k in node.keywords}
+			if isinstance(node.func, ast.Call):
+				raise SyntaxError("Nested function calls are not permitted")
+			elif node.func.id in mathfunc:  # functions in math module
+				fn = getattr(math, node.func.id)
+				return fn(*args)
+			elif node.func.id in polyfunc:
+				fn = polyfunc[node.func.id]
+				return fn(*args)
+			elif node.func.id in boolfunc:
+				fn = boolfunc[node.func.id]
+				if len(args) == 1:
+					return math.nan if math.isnan(args[0]) else float(fn(args[0], 0.0))
+				elif len(args) == 2:
+					return math.nan if math.isnan(args[0]) or math.isnan(args[1]) else float(fn(*args))
+				else:
+					raise SyntaxError("Boolean operator function must have 1 or 2 arguments")
+			else:
+				raise SyntaxError("Not a math function")
+		elif isinstance(node, ast.Name):
+			if '__' in node.id:
+				raise SyntaxError(f"Bad variable '{node.id}'. Double-underscore in variable name is not allowed.")
+			elif node.id in self.variables:  # Variables where a value is substituted
+				value = self.variables[node.id]
+				return value if isinstance(value, (float, int)) else math.nan
+			elif node.id.lower() == 'inf':
+				raise ValueError("Value must be finite, not inf or -inf")
+			elif node.id.lower() == 'nan':
+				raise ValueError("Value must be definite, not nan")
+			elif node.id in ['pi', 'e']:  # math constants
+				return getattr(math, node.id)
+			elif node.id in phconst:  # constants in physconst
+				return getattr(physconst, node.id)
+			else:
+				self.undefined_variables.add(node.id)
+				return math.nan
+		else:
+			raise SyntaxError(f"Invalid expression node of type {type(node).__name__}")
+
+	def substitute_variable_names(self, substitutions):
+		"""Substitute all occurrences of variables and return a new instance
+
+		Arguments:
+		substitutions  A dict instance. The keys are the source values to be
+		               replaced, the values the target values. Both must be
+		               strings.
+		"""
+		if not isinstance(substitutions, dict):
+			raise TypeError("Argument substitutions must be a dict instance.")
+		if not all(isinstance(k, str) for k in substitutions.keys()):
+			raise TypeError("The keys of argument substitutions must be strings.")
+		if not all(isinstance(v, str) for v in substitutions.values()):
+			raise TypeError("The values of argument substitutions must be strings.")
+		new_instance = copy.deepcopy(self)
+		new_instance._subst_var(new_instance.tree, substitutions)
+		return new_instance
+
+	def _subst_var(self, node, subst):
+		"""Recursively substitute variables in ast tree"""
+		if isinstance(node, ast.Expression):
+			self._subst_var(node.body, subst)
+		elif isinstance(node, ast.Constant):
+			pass
+		elif isinstance(node, ast.Tuple):
+			for e in node.elts:
+				self._subst_var(e, subst)
+		elif isinstance(node, ast.BinOp):
+			self._subst_var(node.left, subst)
+			self._subst_var(node.right, subst)
+		elif isinstance(node, ast.UnaryOp):
+			self._subst_var(node.operand, subst)
+		elif isinstance(node, ast.Call):
+			for x in node.args:
+				self._subst_var(x, subst)
+		elif isinstance(node, ast.Name):
+			if node.id in subst:
+				node.id = subst[node.id]
+		else:
+			raise SyntaxError(f"Invalid expression node of type {type(node).__name__}")
+
+def to_ast(x):
+	"""Helper function to convert expression to an AST object"""
+	if isinstance(x, (float, int)):
+		return ast.Constant(x)
+	elif isinstance(x, ast.Expression):
+		return x.body
+	elif isinstance(x, AstParameter):
+		return x.tree.body
+	else:
+		raise TypeError(f"Invalid expression of type {type(x)})")
+
+def to_tuple(x):
+	"""Helper function to convert expression to a tuple"""
+	if isinstance(x, tuple):
+		return x
+	elif isinstance(x, list):
+		return tuple(x)
+	elif isinstance(x, (float, int)):
+		return (x,)
+	elif isinstance(x, ast.Expression):
+		if isinstance(x.body, (ast.Tuple, ast.List)):
+			return tuple(x.body.elts)
+		else:
+			return (x.body,)
+	elif isinstance(x, AstParameter):
+		if isinstance(x.tree.body, (ast.Tuple, ast.List)):
+			return tuple(x.tree.body.elts)
+		else:
+			return (x.tree.body,)
+	else:
+		raise TypeError(f"Invalid expression of type {type(x)}")
+
+def ast_linint(a, b, x, explicit=False):
+	"""Return symbolic linear interpolation between values a and b
+
+	Arguments:
+	a         Number, ast.Expression, AstParameter, or None. First value. If
+	          None, return b.
+	b         Number, ast.Expression, AstParameter, or None. Second value. If
+	          None, return a.
+	x         String. The variable. (Note: Not a numerical value!)
+	explicit  True or False. If True, return an AstParameter that encodes
+	          (1 - x) * a + x * b. If False (default), one that encodes
+	          linint(a, b, x).
+
+	Returns:
+	value     Number, ast.Expression, AstParameter, or None. Generally an
+	          AstParameter encoding either linint(a, b, x) or
+	          (1 - x) * a + x * b. If either a or b is None, it inherits the
+	          type from the other value. If a and b are equal (and of the same
+	          type), return a.
+	"""
+	if a is None:
+		return b
+	elif b is None:
+		return a
+	if type(a) == type(b) and a == b:
+		return a
+
+	if not isinstance(x, str):
+		raise TypeError("Argument x must be a string.")
+	ast_x = ast.Name(x)
+
+	val1 = to_ast(a)
+	val2 = to_ast(b)
+
+	if explicit:
+		mult1 = ast.BinOp(ast.Constant(1), ast.Sub(), ast_x)
+		mult2 = ast_x
+		term1 = ast.BinOp(mult1, ast.Mult(), val1)
+		term2 = ast.BinOp(mult2, ast.Mult(), val2)
+		ast_expr = ast.Expression(ast.BinOp(term1, ast.Add(), term2))
+	else:
+		args = [val1, val2, ast.Name(x)]
+		kwds = []
+		ast_expr = ast.Expression(ast.Call(ast.Name('linint'), args, kwds))
+
+	return AstParameter(ast_expr)
+
+def ast_linint_tuple(a, b, x, explicit=False):
+	"""Return symbolic linear interpolation between tuples a and b
+
+	Arguments:
+	see ast_linint()
+
+	Returns:
+	value   Tuple or AstParameter.
+	"""
+	val1 = to_tuple(a)
+	val2 = to_tuple(b)
+	if len(val1) != len(val2):
+		raise ValueError("Tuples a and b must be of same length")
+	if val1 == val2:
+		return a
+	if not isinstance(x, str):
+		raise TypeError("Argument x must be a string.")
+	ast_x = ast.Name(x)
+
+	new_elts = []
+	for e1, e2 in zip(val1, val2):
+		if e1 == e2:
+			e = to_ast(e1)
+		elif isinstance(e1, (float, int)) and e1 == 0:
+			if isinstance(e2, (float, int)) and e2 == 1:
+				e = ast_x
+			else:
+				e = ast.BinOp(ast_x, ast.Mult(), to_ast(e2))
+		elif isinstance(e2, (float, int)) and e2 == 0:
+			mult = ast.BinOp(ast.Constant(1), ast.Sub(), ast_x)  # 1 - x
+			if isinstance(e1, (float, int)) and e1 == 1:
+				e = mult
+			else:
+				e = ast.BinOp(mult, ast.Mult(), to_ast(e1))
+		else:
+			e = to_ast(ast_linint(e1, e2, x, explicit=explicit))
+		new_elts.append(e)
+
+	ast_expr = ast.Expression(ast.Tuple(new_elts))
+	return AstParameter(ast_expr)
+
+### MISCELLANEOUS ###
+def is_valid_parameter(s):
+	"""Test whether parameter name is valid.
+
+	A custom parameter name must start with a letter and contain only the ASCII
+	alphanumeric characters A-Z, a-z, and 0-9, as well as underscore _.
+
+	The following names are reserved, hence invalid:
+	- Python keywords (like for, if, while, etc.); exception: as
+	- Functions in math module
+	- Custom math functions defined in this module (linint, poly, etc.)
+	- Boolean comparison functions geq, leq, gtr, less
+	- Physical constants defined in the physconst module
+	- Math constants pi and e (lowercase only), inf and nan (all cases)
+	- Common variables T, x, y, z
+	"""
+	if not isinstance(s, str):
+		raise TypeError("Argument must be a string")
+	if keyword.iskeyword(s) and s != 'as':
+		return False
+	if s in mathfunc or s in polyfunc or s in boolfunc or s in phconst:
+		return False
+	if s in ["T", "x", "y", "z", "pi", "e"]:
+		return False
+	if s.lower() in ["inf", "nan"]:
+		return False
+	return re.fullmatch("[A-Za-z][A-Za-z0-9_]*", s) is not None
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/materials/default b/kdotpy-v1.0.0/src/kdotpy/materials/default
new file mode 100644
index 0000000000000000000000000000000000000000..2bcec788821acc1d8741c25ddaf145a1a96d524c
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/materials/default
@@ -0,0 +1,176 @@
+# kdotpy material definitions
+
+# - # - # - # - # - # - # - # - # - # - # - # - # - # - # - # - #
+## HgTe, mercury telluride
+[HgTe]
+compound    = HgTe
+composition = 1, 1     
+P           = sqrt(18800. * hbarm0)
+Ev          = 0.0
+Ec          = -303.0 + 0.495 * T ** 2 / (11.0 + T)  # Eg of HgCdTe for x = 0
+gamma1      = 4.1
+gamma2      = 0.5
+gamma3      = 1.3
+F           = 0.0
+kappa       = -0.4
+ge          = 2.0
+q           = 0.0
+a           = 0.6462
+strain_C1   = -3.83e3
+strain_Dd   = 0e3
+strain_Du   = 2.25e3
+strain_Duprime = sqrt(0.75) * 2.08e3
+exch_yNalpha   = 0.0
+exch_yNbeta    = 0.0
+diel_epsilon   = 20.8
+delta_so    = 1.08e3
+piezo_e14   = 0.035 * 1e18 * e_el  # from [Fortran]
+bia_c       = -7.4
+bia_b8p     = -106.46
+bia_b8m     = -13.77
+bia_b7      = -100.0
+
+# On piezo_e14:
+# [Fortran] A. Pfeuffer-Jeschke, E. G. Novik et al.
+# See also:
+# [Adachi] S. Adachi, "Properties of Semiconductor Alloys: Group-IV, III-V
+# and II-VI Semiconductors" (book). This Ref. lists 0.029 C / m^2 for HgTe and
+# 0.0335 C / m^2 for CdTe
+
+# - # - # - # - # - # - # - # - # - # - # - # - # - # - # - # - #
+## CdTe, cadmium telluride
+[CdTe]
+compound    = CdTe
+composition = 1, 1   
+P           = sqrt(18800. * hbarm0)
+Eg          = 1606.0 - 0.325 * T ** 2 / (78.7 + T)  # Eg of HgCdTe for x = 1
+Eg0         = -303.0 + 0.495 * T ** 2 / (11.0 + T)  # Eg of HgCdTe for x = 0
+Evoff       = -570. * (Eg - Eg0) / (1606 - -303)
+Ev          = Evoff
+Ec          = Evoff + Eg
+gamma1      = 1.47
+gamma2      = -0.28
+gamma3      = 0.03
+F           = -0.09
+kappa       = -1.31
+ge          = 2.0
+q           = 0.0
+a           = 0.6482
+strain_C1   = -4.06e3
+strain_Dd   = -0.7e3
+strain_Du   = 1.755e3
+strain_Duprime = sqrt(0.75) * 3.2e3
+exch_yNalpha   = 0.0
+exch_yNbeta    = 0.0
+diel_epsilon   = 10.2
+delta_so    = 0.91e3
+piezo_e14   = 0.035 * 1e18 * e_el
+bia_c       = -2.34
+bia_b8p     = -224.1
+bia_b8m     = -6.347
+bia_b7      = -204.7
+
+# - # - # - # - # - # - # - # - # - # - # - # - # - # - # - # - #
+## HgCdTe (Hg_{1-x} Cd_x Te), mercury cadmium telluride
+[HgCdTe]
+compound    = HgCdTe
+linearmix   = HgTe,CdTe,x 
+composition = 1 - x, x, 1  # is also set automatically by linearmix 
+P           = sqrt(18800. * hbarm0)
+Eg          = -303 * (1 - x) + 1606 * x - 132. * x * (1 - x) + \
+              (0.495 * (1 - x) - 0.325 * x - 0.393 * x * (1 - x)) * T ** 2 / (11.0 * (1 - x) + 78.7 * x + T)  # identical to Ref. [HgCdTe2]
+Eg0         = -303.0 + 0.495 * T ** 2 / (11.0 + T)
+Evoff       = -570. * (Eg - Eg0) / (1606 - -303)
+Ev          = Evoff
+Ec          = Evoff + Eg
+gamma1      = poly( 4.1, -2.8801,  0.3159, -0.0658, x)
+gamma2      = poly( 0.5, -0.7175, -0.0790,  0.0165, x) 
+gamma3      = poly( 1.3, -1.3325,  0.0790, -0.0165, x)
+kappa       = poly(-0.4, -0.8475, -0.0790,  0.0165, x)
+a           = 0.6462 + 0.0009 * x + 0.0017 * x**2 - 0.0006 * x**3
+
+# The special parameter 'linearmix' defines all further material parameters (not
+# specified explicitly here) by linear interpolation between HgTe and CdTe.
+
+# More avanced dependencies of gap value Eg and band offset Evoff:
+# See:
+# [HgCdTe1] Laurenti et al., J. Appl. Phys. 67, 6454 (1990)
+# [HgCdTe2] Becker, Latussek, Pfeuffer-Jeschke, Landwehr, and Molenkamp, Phys. Rev. B 62, 10353 (2000)
+
+# Nonlinear interpolation of gamma1, gamma2, gamma3, kappa:
+# These are approximations to the results from the Pfeuffer-Jeschke program
+# [Fortran], with deviations smaller than 0.0003; these errors are negligible in
+# practice.
+# Details may be found in his thesis [PJ]. However, it is not explained why the
+# band parameters F, G, and H2 are linearly interpolated, while H1 is
+# inverse-linearly interpolated (i.e., H1^-1 is linearly interpolated).
+
+# Lattice constant a is adapted from [Berding] and [Ames]. Note that these Refs.
+# contain a typo (factor of ten).
+
+# - # - # - # - # - # - # - # - # - # - # - # - # - # - # - # - #
+## HgMnTe (Hg_{1-x} Mn_x Te),
+[HgMnTe]
+compound    = HgMnTe
+copy        = HgTe
+composition = 1 - x, x, 1
+Ec          = -303.0 + (4726. * 1339. / 1909.) * x
+Ev          = 0.0 + (4726. * -570. / 1909.) * x
+exch_yNalpha = 0.4e3 * x  # y * 400 meV; questionable value
+exch_yNbeta  = -0.6e3 * x  # y * -600 meV; questionable value
+exch_g      = 2
+exch_TK0    = 2.6
+a           = 0.6462 - 0.0114 * x  # [Furdyna]
+
+# The special parameter 'copy' copies all further material parameters (not
+# specified explicitly here) from HgTe.
+
+## Gap ansatz:
+## (1) linear approximation to matHgCdTe(2 * y); a rather crude approximation
+# Ec = -303.0 + 2678. * x
+# Ev = 0.0 - 1140. * x
+## (2) Cubic fit from data sheet and assumption of 'proportional scaling' of Ec
+## and Ev as function of the gap, namely:
+# Eg = -303 + 6229 * x - 34600 * x**2 + 122000 * x**3
+# Ec = -303 + (1339/1909) * (6229 * x - 34600 * x**2 + 122000 * x**3)
+# Ev =    0 -  (570/1909) * (6229 * x - 34600 * x**2 + 122000 * x**3)
+## (3) Linear fit from data sheet and assumption of 'proportional scaling' of Ec
+## and Ev as function of the gap, namely:
+# Eg = -303 + 4726 * x
+# Ec = -303 + (1339/1909) * 4726 * x
+# Ev =    0 -  (570/1909) * 4726 * x
+#
+# These values for HgMnTe are accurate in the low-doping regime only
+
+# - # - # - # - # - # - # - # - # - # - # - # - # - # - # - # - #
+## CdZnTe (Cd_{1-x} Zn_x Te), cadmium zinc telluride (substrate)
+[CdZnTe]
+compound    = CdZnTe
+copy        = CdTe
+composition = 1 - x, x, 1
+a           = 0.6482 - 0.0378 * x  # similar to [Berding]
+# very crudely approximated
+# only the lattice constant is reliable
+# band parameters are taken from CdTe, but there is no justification
+
+# - # - # - # - # - # - # - # - # - # - # - # - # - # - # - # - #
+## Va, vacuum (use default parameters)
+## Large k = 0 band edges for valence and conduction bands
+## The (bulk) Hamiltonian is diagonal, with dispersions
+## E(k=0) +/- (hbar k)^2/2m  [set by F = 0 and gamma1 = 1]
+[Va]
+compound    = Va
+Ev          = -1e6
+Ec          = 1e6
+
+
+# References:
+# [Fortran] A. Pfeuffer-Jeschke, E. G. Novik et al., Fortran program "bsqw"
+# [Adachi]  S. Adachi, "Properties of Semiconductor Alloys: Group-IV, III-V and II-VI Semiconductors" (book).
+# [HgCdTe1] Laurenti et al., J. Appl. Phys. 67, 6454 (1990)
+# [HgCdTe2] Becker, Latussek, Pfeuffer-Jeschke, Landwehr, and Molenkamp, Phys. Rev. B 62, 10353 (2000)
+# [PJ]      A. Pfeuffer-Jeschke, PhD thesis, University of Würzburg (1999)
+# [Berding] Berding et al., J. El. Mater. 29, 676 (2000)
+# [Ames]    C. Ames, PhD thesis, University of Würzburg (2015)
+# [Furdyna] J. K. Furdyna, J. Appl. Phys. 84, R29 (1988)
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/materials/initial.py b/kdotpy-v1.0.0/src/kdotpy/materials/initial.py
new file mode 100644
index 0000000000000000000000000000000000000000..0a2210558dd8dd963e73a388ca538e2434c8fbd7
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/materials/initial.py
@@ -0,0 +1,90 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import sys
+import os
+import shutil
+from .materials import MaterialsList
+
+configpath = os.path.join(os.path.expanduser('~'), '.kdotpy')
+materialspath = os.path.join(configpath, 'materials')
+materialsfile = 'default'
+scriptpath = os.path.dirname(os.path.realpath(__file__))
+debug_fname = "kdotpy-materials.log"
+
+allMaterials = MaterialsList({})
+
+def initialize_materials():
+	"""Initialize materials file
+
+	Read the 'default' materials file from the directory '~/.kdotpy/materials'.
+	If it does not exist yet, copy the 'default' materials file there. Then read
+	all other files in '~/.kdotpy/materials'.
+	"""
+	# Default materials file
+	source_mat_file = os.path.join(scriptpath, materialsfile)
+	default_mat_file = os.path.join(materialspath, materialsfile)
+	if not os.path.isfile(source_mat_file):
+		raise OSError("Built-in default materials file does not exist")
+	if not os.path.isdir(materialspath):
+		os.mkdir(materialspath)
+	if not os.path.isfile(default_mat_file):
+		shutil.copy(source_mat_file, default_mat_file)
+		sys.stderr.write(f"Info (initialize_materials): New materials file '{materialsfile}' created in {materialspath}.\n")
+	allMaterials.load_from_file(source_mat_file)
+	allMaterials.load_from_file(default_mat_file, redef_warning=False)
+
+	# All other files in ~/.kdotpy/materials
+	all_mat_files = sorted([f.name for f in os.scandir(materialspath) if f.is_file()])
+	if 'verbose' in sys.argv:
+		print("Material files:", ", ".join(['<built-in>', materialsfile] + [f for f in all_mat_files if f != materialsfile]))
+
+	for filename in all_mat_files:
+		if filename == materialsfile:  # skip default materials file
+			continue
+
+		filename_full = os.path.join(materialspath, filename)
+		allMaterials.load_from_file(filename_full)
+
+	if 'verbose' in sys.argv:
+		print(f"Material parameters dumped to {debug_fname} for debugging")
+		with open(debug_fname, 'w') as f:
+			allMaterials.dump(stream=f)
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/materials/materials.py b/kdotpy-v1.0.0/src/kdotpy/materials/materials.py
new file mode 100644
index 0000000000000000000000000000000000000000..03928c2d95e8eef33ecd0189bbf6c9a5818b07dc
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/materials/materials.py
@@ -0,0 +1,1086 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import sys
+import os
+import re
+import itertools
+import configparser
+import graphlib
+from .base import AstParameter, ast_linint, to_tuple, ast_linint_tuple
+from .base import is_valid_parameter
+
+#### ELEMENTS AND COMPOUNDS ####
+el_group_ids = ['', 'I', 'II', 'III', 'IV', 'V', 'VI', 'VII', 'VIII', 'VIII', 'VIII', 'I', 'II', 'III', 'IV', 'V', 'VI', 'VII', '0']
+
+## List of chemical elements. This is a part of the periodic table. The unstable
+## elements (Tc and all elements beyond Bi) as well as the lanthanides have not
+## been included.
+el_groups = [
+	[],  # placeholder for 0
+	[ 'H', 'Li', 'Na',  'K', 'Rb', 'Cs'],  # 1 Alkali metals and hydrogen
+	[      'Be', 'Mg', 'Ca', 'Sr', 'Ba'],  # 2 Alkaline earths
+	[                  'Sc',  'Y'      ],  # 3 (Lanthanides excluded)
+	[                  'Ti', 'Zr', 'Hf'],  # 4
+	[                   'V', 'Nb', 'Ta'],  # 5
+	[                  'Cr', 'Mo',  'W'],  # 6
+	[                  'Mn',       'Re'],  # 7
+	[                  'Fe', 'Ru', 'Os'],  # 8
+	[                  'Co', 'Rh', 'Ir'],  # 9
+	[                  'Ni', 'Pd', 'Pt'],  # 10
+	[                  'Cu', 'Ag', 'Au'],  # 11
+	[                  'Zn', 'Cd', 'Hg'],  # 12
+	[       'B', 'Al', 'Ga', 'In', 'Tl'],  # 13
+	[       'C', 'Si', 'Ge', 'Sn', 'Pb'],  # 14
+	[       'N',  'P', 'As', 'Sb', 'Bi'],  # 15 Pnictogens
+	[       'O',  'S', 'Se', 'Te'      ],  # 16 Chalcogens
+	[       'F', 'Cl', 'Br',  'I'      ],  # 17 Halogens
+	['He', 'Ar', 'Ne', 'Kr', 'Xe'      ]]  # 18 Noble gases
+
+def el_group(el):
+	"""Return the group (column in the periodic table) of a chemical element"""
+	for g in range(1, 19):
+		if el in el_groups[g]:
+			return g
+	return None
+
+def el_group_id(el, invalid=None):
+	"""Return the group id (roman numeral labelling the column in the periodic table) of a chemical element"""
+	g = el_group(el)
+	return invalid if g is None else el_group_ids[g]
+
+def split_compound(compound):
+	"""Split the chemical formula of a compound into elements"""
+	return re.findall("[A-Z][a-z]?", compound)
+
+def combine_compounds(elem1, elem2, linint_var=None):
+	"""Combine two lists/dicts of elements to a new one, trying to preserve order
+
+	For example, combining ['Hg', 'Cd', 'Te'] and ['Cd', 'Zn', 'Te'] yields
+	['Hg', 'Cd', 'Zn', 'Te']. If the both inputs are dict instances, where the
+	values are the compositions, return a new dict with the composition values
+	added together.
+
+	Arguments:
+	elem1       List or dict. The list items or dict keys are strings
+	            representing the chemical elements. The dict values are the
+	            chemical composition values.
+	elem2       List or dict, analogous to elem1.
+	linint_var  String or None. If set, do a linear interpolation of the
+	            chemical composition values with respect to this variable. This
+	            only does something if elem1 and elem2 are dict instances. If
+	            None, add the composition values.
+
+	Returns:
+	unique_elem  List or dict. A dict if both elem1 and elem2 are dict
+	             instances, otherwise a list. See example above.
+	"""
+	combined_list = [y for x in itertools.zip_longest(elem1, elem2) for y in x if y is not None]
+	unique_r = []
+	for x in reversed(combined_list):
+		if x not in unique_r:
+			unique_r.append(x)
+	unique_elem = list(reversed(unique_r))
+	if isinstance(elem1, dict) and isinstance(elem2, dict):
+		comp1 = [0 if el not in elem1 else 1 if elem1[el] is None else elem1[el] for el in unique_elem]
+		comp2 = [0 if el not in elem2 else 1 if elem2[el] is None else elem2[el] for el in unique_elem]
+		if linint_var is None:
+			comp = comp1 + comp2
+		else:
+			comp = ast_linint_tuple(tuple(comp1), tuple(comp2), linint_var)
+		if isinstance(comp, AstParameter):
+			comp = comp.expand()
+		return dict(zip(unique_elem, comp))
+	else:
+		return unique_elem
+
+def formula_to_compound(formula):
+	"""Parse a chemical formula.
+	For example, parse 'HgCd20%Te to the lists elements = ['Hg', 'Cd', 'Te'] and
+	composition = [None, 0.2, None].
+
+	Argument:
+	formula   The string to be parsed
+
+	Returns:
+	elements     List of elements (strings)
+	composition  List of compositions/concentrations (numbers)
+	"""
+	# test for correct case
+	m = re.match(r"(([A-Z][a-z]?)(\$?_?\{?([.0-9]+)%?\}?\$?)?)*$", formula.strip())
+	if m is None:
+		return None, None
+	# else:
+	f_all = re.findall(r"([A-Z][a-z]?)(\$?_?\{?([.0-9]+)%?\}?\$?)?", formula.strip())
+	elements = [f[0] for f in f_all]
+	composition = [None if f[2] == '' else 0.01 * float(f[2]) if '%' in f[1] else float(f[2]) for f in f_all]
+
+	return elements, composition
+
+
+#### MATERIAL CLASS ####
+
+## Material parameters which are recognised and their default values
+material_parameters_default = {
+	'Ec': 1.e6,
+	'Ev': -1.e6,
+	'P': 0,
+	'F': 0.0,
+	'gamma1': 1.0,
+	'gamma2': 0.0,
+	'gamma3': 0.0,
+	'kappa': 0.0,
+	'ge': 2.0,
+	'q': 0.0,
+	'a': 1.0,
+	'strain_C1': 0.0,
+	'strain_Dd': 0.0,
+	'strain_Du': 0.0,
+	'strain_Duprime': 0.0,
+	'exch_yNalpha': 0.0,
+	'exch_yNbeta': 0.0,
+	'exch_g': 2.0,
+	'exch_TK0': 1e-6,
+	'diel_epsilon': 1.0,
+	'delta_so': 0.0,
+	'piezo_e14': 0.0,
+	'bia_c': 0.0,
+	'bia_b8p': 0.0,
+	'bia_b8m': 0.0,
+	'bia_b7': 0.0
+}
+
+## Aliases for material parameter, of the form {'oldname': 'newname', ...}
+material_parameters_alias = {
+	'epsilon_diel': 'diel_epsilon',
+	'e14_piezo': 'piezo_e14',
+	'yNalpha': 'exch_yNalpha',
+	'yNbeta': 'exch_yNbeta',
+	'as': 'strain_Dd',
+	'cs': 'strain_C1'
+}
+
+## Alias for material parameter, with functional dependence.
+## Form: {'oldname': ('newname', AstParameter("function"))
+## Whenever 'oldname' is set as material parameter, add an additional parameter
+## 'newname' with the AstParameter as value, if it is not already defined.
+material_parameters_alias_ast = {
+	'Ep': ('P', AstParameter("sqrt(Ep * hbarm0)")),
+	'bs': ('strain_Du', AstParameter("-1.5 * bs")),
+	'ds': ('strain_Duprime', AstParameter("-0.5 * sqrt(3) * ds"))
+}
+
+## Units of the quantities (also contains derived quantities)
+## Dimensionless quantities can either be omitted, or have the value None
+material_parameters_units = {
+	'Ec': 'meV', 'Ev': 'meV', 'Ep': 'meV', 'P': 'meV nm',
+	'as': 'meV', 'bs': 'meV', 'cs': 'meV', 'ds': 'meV',
+	'strain_C1': 'meV', 'strain_Dd': 'meV', 'strain_Du': 'meV', 'strain_Duprime': 'meV',
+	'Repsilon': 'meV', 'Sepsilon': 'meV', 'Tepsilon': 'meV', 'Uepsilon': 'meV', 'Vepsilon': 'meV',
+	'exch_yNalpha': 'meV', 'exch_yNbeta': 'meV', 'exch_TK0': 'K',
+	'delta_so': 'meV',
+	'a': 'nm', 'aFree': 'nm',
+	'piezo_e14': 'e/nm^2',
+	'bia_b8p': 'meV nm^2', 'bia_b8m': 'meV nm^2', 'bia_b7': 'meV nm^2',
+	'bia_c': 'meV nm'}
+
+material_parameters_tex = {
+	'Ec': 'E_\\mathrm{c}', 'Ev': 'E_\\mathrm{v}', 'Ep': 'E_\\mathrm{p}', 'P': 'P',
+	'F': 'F', 'gamma1': '\\gamma_1', 'gamma2': '\\gamma_2', 'gamma3': '\\gamma_3',
+	'kappa': '\\kappa', 'ge': 'g_\\mathrm{e}', 'q': 'q',
+	'as': 'a_{\\mathrm{dp}}', 'bs': 'b_{\\mathrm{dp}}', 'cs': 'C_{\\mathrm{dp}}', 'ds': 'd_{\\mathrm{dp}}',
+	'strain_C1': 'C_1', 'strain_Dd': 'D_d', 'strain_Du': 'D_u', 'strain_Duprime': 'D\'_u',
+	'Repsilon': 'R_\\epsilon', 'Sepsilon': 'S_\\epsilon', 'Tepsilon': 'T_\\epsilon', 'Uepsilon': 'U_\\epsilon', 'Vepsilon': 'V_\\epsilon',
+	'exch_yNalpha': 'yN_0\\alpha', 'exch_yNbeta': 'yN_0\\beta', 'exch_TK0': 'T_{\\mathrm{K}0}', 'exch_g': 'g_\\mathrm{ex}',
+	'delta_so': '\\Delta_\\mathrm{SO}',
+	'a': 'a', 'aFree': 'a_\\mathrm{free}',
+	'piezo_e14': '\\epsilon_{14}'}
+
+# Regex for validation of material id
+re_material_id = r"[A-Za-z][A-Za-z0-9_-]*?"
+
+class Material:
+	"""Container for material properties, such as band energies and Luttinger parameters.
+	This class also supports materials with a free mixing parameter, such as
+	Hg_{1-x}Cd_{x}Te.
+	"""
+	def __init__(self, name, compound = None, elements = None, composition = None, param = {}, variables = None):
+		## Default for argument param is not changed, hence safe
+		self.name = name
+		if isinstance(variables, dict):
+			self.variables = variables
+		elif variables is None:
+			self.variables = {}
+		else:
+			raise TypeError("Argument variables must be a dict instance.")
+		self.param = {}
+
+		# Set compound, elements, composition. Note that the order is important.
+		self.compound = None
+		self.elements = None
+		self.composition = None
+		self.set_compound(compound)
+		self.set_elements(elements)
+		self.set_composition(composition)
+
+		for p, value in param.items():
+			if p in material_parameters_alias:
+				p = material_parameters_alias[p]
+			if p not in material_parameters_default and not is_valid_parameter(p):
+				sys.stderr.write(f"ERROR (Material): {p} is not a valid material parameter. ({self.name})\n")
+				continue
+			if isinstance(value, (float, int, complex, tuple)):
+				self.param[p] = value
+			elif isinstance(value, str):
+				astparam = AstParameter(value)
+				self.param[p] = astparam.substitute(**self.param)
+			elif isinstance(value, AstParameter):
+				astparam = value
+				self.param[p] = astparam.substitute(**self.param)
+			else:
+				raise TypeError(f"Invalid type for parameter value {p}")
+			if p in material_parameters_alias_ast:
+				p_new, value_new = material_parameters_alias_ast[p]
+				if p_new not in self.param:
+					self.param[p_new] = value_new
+		for p in material_parameters_default:
+			if p not in self.param:
+				self.param[p] = material_parameters_default[p]
+		# TODO: Re-evaluation in evaluation order?
+
+	def evaluate(self, name = None, composition = None, **variables):
+		"""Return new material based on certain composition/concentration.
+
+		Arguments:
+		name         None or string. If set, name of the new Material. If None,
+		             inherit the name from the present Material instance.
+		composition  None or list of numbers. If None, use the composition
+		             inherited from the present Material instance.
+		**variables  Numerical values. Variables that are substituted, such as
+		             'x' (a concentration) and 'T' (temperature). The values
+		             are cached in the new Materials instance, so it is possible
+		             to substitute variables subsequently.
+
+		Returns:
+		A new Material instance
+		"""
+		new_param = {}
+		new_variables = self.variables.copy()
+		for var, val in variables.items():
+			if isinstance(val, (float, int)):
+				new_variables[var] = val
+
+		eval_order = self.get_evaluation_order()
+		for p in eval_order:
+			if isinstance(self.param[p], (float, int, complex, tuple)):
+				new_param[p] = self.param[p]
+			elif isinstance(self.param[p], AstParameter):
+				new_param[p] = self.param[p].substitute(**new_variables, **new_param)
+			else:
+				raise TypeError(f"Invalid type for parameter value {p}")
+
+		compound = self.get_compound()
+		elements = self.get_elements()
+		composition = self.get_composition()
+		return Material(
+			self.name if name is None else name, compound=compound,
+			elements=elements, composition=composition, param=new_param,
+			variables=new_variables)
+
+	def get_composition(self):
+		"""Get composition as a tuple"""
+		if isinstance(self.composition, AstParameter):
+			co = self.composition.substitute(**self.variables)
+			return (co,) if isinstance(co, (float, int)) else co
+		elif isinstance(self.composition, tuple):
+			return tuple(
+				co.substitute(**self.variables) if isinstance(co, AstParameter) else co for co in self.composition
+			)
+		elif self.composition is None:
+			return None
+		else:
+			raise TypeError(f"Invalid type {type(self.composition)} for {self.name}.composition")
+
+	def set_composition(self, comp):
+		"""Set composition as a tuple"""
+		if comp is None:
+			self.composition = tuple(1 for _ in split_compound(self.name))
+			return
+		if isinstance(comp, str):
+			comp = AstParameter(comp)
+		# Check if comp is a tuple or equivalent; if not to_tuple raises a TypeError
+		as_tuple = to_tuple(comp)
+		if self.elements is not None and len(self.elements) != len(as_tuple):
+			raise ValueError("Length of composition must match the number of elements")
+		self.composition = comp if isinstance(comp, AstParameter) else as_tuple
+
+	def get_elements(self):
+		"""Get a list of the chemical elements"""
+		if self.elements is not None:
+			return self.elements
+		else:
+			formula = self.compound if self.compound is not None else self.name
+			el, co = formula_to_compound(formula)
+			return el
+
+	def set_elements(self, elements):
+		"""Set elements from a list or a string"""
+		if elements is None:
+			formula = self.compound if self.compound is not None else self.name
+			el, co = formula_to_compound(formula)
+			self.elements = el
+		elif isinstance(elements, str):
+			self.elements = [el.strip().lstrip() for el in elements.split(',')]
+		elif isinstance(elements, (list, tuple)):
+			self.elements = list(elements)
+		else:
+			raise TypeError("Argument elements must be a list/tuple or string.")
+
+	def get_compound(self):
+		"""Get compound as a string"""
+		if self.compound is not None:
+			return self.compound
+		el = self.get_elements()
+		return None if el is None else "".join(el)
+
+	def set_compound(self, compound):
+		"""Set compound from a string"""
+		if isinstance(compound, str):
+			self.compound = compound
+		elif compound is not None:
+			raise TypeError("Argument compound must be a string or None.")
+		elif self.elements is not None:
+			self.compound = "".join(self.elements)
+		else:
+			el, co = formula_to_compound(self.name)
+			self.compound = None if el is None else "".join(el)
+
+	def get_groups(self):
+		"""Get a list of group numbers corresponding to the chemical elements"""
+		return [el_group(el) for el in self.get_elements()]
+
+	def get_group_ids(self, invalid=None):
+		"""Get a list of group labels corresponding to the chemical elements
+
+		Argument:
+		invalid   Substitute with this value if the group id cannot be
+		          determined. Default is None.
+		"""
+		return [el_group_id(el, invalid=invalid) for el in self.get_elements()]
+
+	def __getitem__(self, p):
+		"""Get Material parameter"""
+		if p not in self.param:
+			raise KeyError
+		return self.param[p]
+
+	def __setitem__(self, p, value):
+		"""Set Material parameter"""
+		if p in material_parameters_alias:
+			p = material_parameters_alias[p]
+		if p not in material_parameters_default and not is_valid_parameter(p):
+			sys.stderr.write(f"ERROR (Material): {p} is not a valid material parameter. ({self.name})\n")
+			return
+		if p in self.param:  # Redefining an existing parameter should put it at the end.
+			del self.param[p]
+		if isinstance(value, (float, int, complex, tuple)):
+			self.param[p] = value
+		elif isinstance(value, str):
+			astparam = AstParameter(value)
+			self.param[p] = astparam.substitute(**self.param)
+		elif isinstance(value, AstParameter):
+			astparam = value
+			self.param[p] = astparam.substitute(**self.param)
+		else:
+			raise TypeError(f"Invalid type for parameter value {p}")
+		if p in material_parameters_alias_ast:
+			p_new, value_new = material_parameters_alias_ast[p]
+			if p_new not in self.param:
+				self.param[p_new] = value_new
+		return
+
+	def update(self, d, exclude=[]):
+		"""Update from a dict d"""
+		if not isinstance(d, dict):
+			raise TypeError("Argument must be a dict")
+		for param, value in d.items():
+			if param in material_parameters_alias:
+				param = material_parameters_alias[param]
+			if param not in exclude:
+				self[param] = value
+
+	def add_suffix(self, suffix=None):
+		"""Add suffix to all parameter names, also those in AstParameter values"""
+		if suffix is None:
+			suffix = self.name
+		subst = dict((p, f"{p}_{suffix}") for p in self.param)
+		new_param = {}
+		for p, v in self.param.items():
+			new_p = subst[p]
+			new_param[new_p] = v.substitute_variable_names(subst) if isinstance(v, AstParameter) else v
+		self.param = new_param
+		return self
+
+	def check_complete(self, quiet=False):
+		"""Check whether the Material has a complete set of parameters.
+		Outputs a warning if there is a problem.
+
+		Returns:
+		True or False. Whether the Material has a complete set of parameters.
+		"""
+		missing = [p for p in material_parameters_default if p not in self.param]
+		if quiet:
+			pass
+		elif len(missing) == 1:
+			sys.stderr.write(f"Warning (Material.check_complete): Parameter {missing[0]} is undefined (Material {self.name}).\n")
+		elif len(missing) > 1:
+			param_str = ", ".join(missing)
+			sys.stderr.write(f"Warning (Material.check_complete): Parameters {param_str} are undefined (Material {self.name}).\n")
+		return len(missing) == 0
+
+	def check_numeric(self, quiet=False):
+		"""Check whether all Material parameters are numeric.
+		Outputs a warning if there is a problem.
+
+		Returns:
+		True or False. Whether all Material parameters are numeric.
+		"""
+		nonnumeric = [p for p in material_parameters_default if p in self.param and not isinstance(self.param[p], (float, int))]
+		comp = self.get_composition()
+		if not isinstance(comp, tuple) or any(not isinstance(x, (float, int)) for x in comp):
+			nonnumeric.append('composition')
+		if quiet:
+			pass
+		elif len(nonnumeric) == 1:
+			sys.stderr.write(f"Warning (Material.check_numeric): Parameter {nonnumeric[0]} does not have a numerical value (Material {self.name}).\n")
+		elif len(nonnumeric) > 1:
+			param_str = ", ".join(nonnumeric)
+			sys.stderr.write(f"Warning (Material.check_numeric): Parameters {param_str} do not have numerical values (Material {self.name}).\n")
+		return len(nonnumeric) == 0
+
+	def _get_param_dependencies(self, param):
+		value = self.param.get(param)
+		if isinstance(value, AstParameter):
+			all_dep = {param}
+			for p in value.get_dependencies():
+				if p in self.param:
+					all_dep |= self._get_param_dependencies(p)
+			return all_dep
+		else:
+			return {param}
+
+	def get_param_dependencies(self, param, order=None):
+		"""Find all material parameters the given material parameter depends on"""
+		dep = self._get_param_dependencies(param)
+		if order is None:
+			return [p for p in self.param if p in dep]
+		elif isinstance(order, list):
+			if set(order) != set(self.param.keys()):
+				raise TypeError("Argument order must contain exactly all material parameters")
+			return [p for p in order if p in dep]
+
+	def get_evaluation_order(self):
+		"""Determine evaluation order of parameters based on dependency.
+
+		This function works by calling all parameters of AstParameter types and
+		querying value.undefined_variables all variables that could not be
+		substituted. These are put in a dependency graph that is sorted with
+		graphlib.TopologicalSorter(). If there are cyclic dependences, this
+		function will raise an exception, which we treat as a fatal error.
+
+		Returns:
+		eval_order  List, where the elements are the parameters. This list is
+		            a concatenation of two parts. The first part consists of the
+		            parameters in self.param with a numerical value. The second
+		            part contains all parameters of AstParameter type, ordered
+		            by dependency (i.e., if A depends on B, A comes after B in
+		            this list). If none of the values of self.param are of type
+		            AstParameter, simply return list(self.param.keys()).
+		"""
+		if not any(isinstance(value, AstParameter) for value in self.param.values()):
+			return list(self.param.keys())
+		dependency_graph = {}
+		for param, value in self.param.items():
+			if isinstance(value, AstParameter):
+				dependency_graph[param] = set(value.get_dependencies())
+		ts = graphlib.TopologicalSorter(dependency_graph)
+		try:
+			order = list(ts.static_order())
+		except graphlib.CycleError as ex:
+			cycle_str = " --> ". join(ex.args[1][::-1])
+			sys.stderr.write(f"ERROR (Material.check_dependency): Cyclic dependence {cycle_str} for material parameters of {self.name}.\n")
+			exit(1)
+		normal_param = [param for param, value in self.param.items() if not isinstance(value, AstParameter)]
+		ast_param = [param for param in order if param in self.param]
+		return normal_param + ast_param
+		# TODO: Using the dependency graph one could also check whether
+		# self.param is already correctly ordered, and to sort self.param in
+		# case it is not yet correctly ordered.
+
+	def copy(self, name = None, **kwds):
+		"""Create a copy. Assign new name if desired.
+
+		Arguments:
+		name     String or None. If not None, assign it as the name to the
+		         target material.
+		**kwds   Arguments compound, elements, and composition that will be
+		         passed to Material.__init__().
+		"""
+		new_param = {}
+		for p in self.param:
+			new_param[p] = self.param[p]
+		return Material(
+			self.name if name is None else name, param=new_param,
+			variables=self.variables, **kwds
+		)
+
+	def __repr__(self):
+		"""Short string representation"""
+		return "Material (%s): %s" % (self.name, str(self.param))
+
+	def __eq__(self, other):
+		"""Test equality of two Material instances
+
+		If either comparand is incomplete or has a non-numerical value, return
+		False.
+		"""
+		if not isinstance(other, Material):
+			raise TypeError("Equality can be tested only with another Material instance")
+		if not self.check_complete(quiet=True) or not other.check_complete(quiet=True):
+			return False
+		if not self.check_numeric(quiet=True) or not other.check_numeric(quiet=True):
+			return False
+		return all([self.param[p] == other.param[p] for p in material_parameters_default])
+
+	def dump(self, substitute=False, stream=sys.stdout):
+		"""Print all material parameters (e.g., for debugging)
+
+		Arguments:
+		substitute  True or False. If True, apply substitute() to all material
+		            parameter value of type AstParameter. If False, leave them
+		            unevaluated.
+		stream      Stream object which has .write() method. By default, this
+		            is sys.stdout, but it may also be a file object.
+		"""
+		l = max(len(param) for param in self.param)
+		s = self.get_compound()
+		stream.write("{:{l}s} = {}\n".format('compound', s, l=l))
+		elements = self.get_elements()
+		s = ", ".join(elements)
+		stream.write("{:{l}s} = {}\n".format('elements', s, l=l))
+		composition = self.get_composition()
+		if substitute and isinstance(composition, AstParameter):
+			composition = composition.substitute(**self.variables, **self.param)
+		if isinstance(composition, tuple):
+			s = ", ".join(str(co) for co in composition)
+		else:
+			s = str(composition).lstrip('(').strip(',)')
+		stream.write("{:{l}s} = {}\n".format('composition', s, l=l))
+		for param, val in self.param.items():
+			if substitute and isinstance(val, AstParameter):
+				val = val.substitute(**self.variables, **self.param)
+			s = str(val)
+			if isinstance(val, tuple):
+				s = s.lstrip('(').strip(',)')
+			stream.write("{:{l}s} = {}\n".format(param, s, l=l))
+
+	def format(self, fmt = None):
+		"""Format into a 'pretty' string.
+
+		Argument:
+		fmt   One of None, 'full', 'sub', 'tex', 'tuple', 'plain'.
+
+		Returns:
+		For example (HgCdTe with x=0.7),
+		fmt = None      'HgCdTe'
+		fmt = 'full'    'Hg0.3Cd0.7Te'
+		fmt = 'sub'     'Hg_{0.3}Cd_{0.7}Te'
+		fmt = 'tex'     'Hg$_{0.3}$Cd$_{0.7}$Te'
+		fmt = 'tuple'   'HgCdTe, 0.7'
+		fmt = 'plain'   'HgCdTe 0.7'
+		If there are any variables other than x, y, z, the 'tuple' and 'plain'
+		formats will produce var=value pairs instead of simply the values.
+		"""
+		compound = self.get_compound()
+		elements = self.get_elements()
+		composition = self.get_composition()
+		if compound is None or composition is None:
+			sys.stderr.write("Warning (Materials.format): Cannot generate chemical formula, because either compound or compositional data is missing.\n")
+			return "??"
+		if isinstance(composition, AstParameter):
+			composition = composition.expand()
+		if len(elements) != len(composition):
+			sys.stderr.write("Warning (Materials.format): Cannot generate chemical formula, because elements and composition data have different lengths.\n")
+			return "??"
+
+		s = ""
+
+		if fmt in ["full", "sub", "tex"]:
+			if fmt == "full":
+				presub, postsub = "", ""
+			elif fmt == "sub":
+				presub, postsub = "_{", "}"
+			elif fmt == "tex":
+				presub, postsub = "$_{", "}$"
+
+			for e, x in zip(elements, composition):
+				if isinstance(x, AstParameter):
+					x = x.substitute(**self.variables)
+				if isinstance(x, AstParameter):
+					if fmt == 'tex':
+						x = x.tex()
+					s += "%s%s %s %s" % (e, presub, x, postsub)
+				elif x is None or isinstance(x, (float, int)) and abs(x - 1) < 1e-6:
+					s += e
+				elif x < 1e-6:
+					pass
+				elif x < 1e-4:
+					xs = "{:.10f}".format(x).rstrip('0')
+					s += "%s%s%s%s" % (e, presub, xs, postsub)
+				else:
+					s += "%s%s%g%s" % (e, presub, x, postsub)
+		elif fmt in ["tuple", "plain"]:
+			s = self.name
+			if all(var in ['x', 'y', 'z', 'T'] for var in self.variables):
+				value_strs = [str(self.variables[var]) for var in ['x', 'y', 'z'] if var in self.variables]
+			else:
+				value_strs = [f"{key}={val}" for key, val in self.variables.items() if key != 'T']
+			if len(value_strs) > 0:
+				sep = ", " if fmt == 'tuple' else " "
+				s += sep + sep.join(value_strs)
+		else:
+			s = compound
+		return s
+
+def linearmix(mat1, mat2, x, name = None, composition = None):
+	"""Mix two materials as mat_new = x * mat1 + (1 - x) * mat2.
+
+	Arguments:
+	mat1         Materials instance. The first material.
+	mat2         Materials instance. The second material.
+	x            String. The variable label, e.g. 'x'. Note that this is the
+	             label, not the value.
+	name         None or string. If set, name of the new Material. If None, use
+	             the name from mat1.
+	composition  None or tuple. If set, composition of the new Material. If
+	             None, use the composition from mat1.
+
+	Returns:
+	Materials instance with a free mixing parameter.
+	"""
+	if not isinstance(mat1, Material) or not isinstance(mat2, Material):
+		raise TypeError("Arguments mat1 and mat2 must be Material instances")
+	if not isinstance(x, str):
+		raise TypeError("Argument x must be a string")
+
+	# Try to evaluate the materials before interpolating between them and add
+	# suffix to source parameters
+	new_param = {}
+	mat1_evaluated = mat1.evaluate().add_suffix()
+	mat2_evaluated = mat2.evaluate().add_suffix()
+	for p in material_parameters_default:
+		p1 = f"{p}_{mat1.name}"
+		p2 = f"{p}_{mat2.name}"
+		val1 = mat1_evaluated.param.get(p1)
+		val2 = mat2_evaluated.param.get(p2)
+		if isinstance(val1, AstParameter):
+			# Add parameters the present mat1 parameter depends on
+			for dp in mat1_evaluated.get_param_dependencies(p1):
+				dval = mat1_evaluated.param.get(dp)
+				if dp != p1 and dp not in new_param and dval is not None:
+					new_param[dp] = dval
+		if isinstance(val2, AstParameter):
+			# Add parameters the present mat2 parameter depends on
+			for dp in mat2_evaluated.get_param_dependencies(p2):
+				dval = mat2_evaluated.param.get(dp)
+				if dp != p2 and dp not in new_param and dval is not None:
+					new_param[dp] = dval
+		val = ast_linint(val1, val2, x)
+		if val is not None:
+			new_param[p] = val
+
+	# Combine elements to infer chemical formula (name) and composition
+	elem1 = mat1.get_elements()
+	elem2 = mat2.get_elements()
+	if mat1.composition is not None and mat2.composition is not None:
+		co1 = dict(zip(elem1, mat1.get_composition()))
+		co2 = dict(zip(elem2, mat2.get_composition()))
+		new_co = combine_compounds(co1, co2, x)
+		new_elem, new_composition = tuple(new_co.keys()), tuple(new_co.values())
+	else:
+		new_elem = combine_compounds(elem1, elem2, x)
+		new_composition = None
+	new_compound = "".join(new_elem)
+	if name is None:
+		name = "".join(new_elem)
+	if composition is None:
+		composition = new_composition
+
+	new_variables = mat1.variables.copy()
+	new_variables.update(mat2.variables)
+
+	return Material(
+		name, composition=composition, compound=new_compound, elements=new_elem,
+		param=new_param, variables=new_variables)
+
+class MaterialsList:
+	"""Wrapper around a dict of Material instances
+	The member function get() was formerly named str_to_material"""
+	def __init__(self, arg):
+		self.materials = {}
+		if isinstance(arg, Material):
+			if not re.match(re_material_id, arg.name):
+				raise ValueError(f"Invalid material name/id {arg.name}")
+			self.materials[arg.name] = arg
+		elif isinstance(arg, list):
+			for m in arg:
+				if not isinstance(m, Material):
+					raise TypeError("Input argument must be a list or dict of Material instances")
+				if not re.match(re_material_id, m.name):
+					raise ValueError(f"Invalid material name/id {m.name}")
+				if m.name in self.materials:
+					sys.stderr.write("Warning (MaterialsList): Material '%s' is redefined.\n" % m.name)
+				self.materials[m.name] = m
+		elif isinstance(arg, dict):
+			for m in arg:
+				if not isinstance(arg[m], Material):
+					raise TypeError("Input argument must be a list or dict of Material instances")
+				if not re.match(re_material_id, m):
+					raise ValueError(f"Invalid material name/id {m}")
+				if m in self.materials:
+					sys.stderr.write("Warning (MaterialsList): Material '%s' is redefined.\n" % m)
+				self.materials[m] = arg[m]
+		else:
+			raise TypeError("Input argument must be a Material instance, or a list or a dict of Material instances")
+
+	def __contains__(self, key):
+		"""Check if material id is in the MaterialsList instance"""
+		return key in self.materials
+
+	def __getitem__(self, key):
+		"""Get material id from the MaterialsList instance"""
+		return self.materials[key]
+
+	def __setitem__(self, key, value):
+		"""Set new Material instance or replace existing one"""
+		if not isinstance(key, str):
+			raise TypeError("MaterialsList key must be a str")
+		if not re.match(re_material_id, key):
+			raise ValueError(f"Invalid material name/id {key}")
+		if not isinstance(value, Material):
+			raise TypeError("MaterialsList value must be a Material instance")
+		if key in self.materials:
+			sys.stderr.write("Warning (MaterialsList): Material '%s' is redefined.\n" % key)
+		self.materials[key] = value
+
+	def get_from_string(self, mat_id, variables = None, verbose = False):
+		"""Get material from string representing a compound
+
+		Arguments:
+		mat_id     String. Material id (name) or compound (chemical formula).
+		variables  None, list, or dict. If a list, the numerical values for
+		           variables x, y, z (more than 3 currently not supported). If a
+		           dict, the keys and values are the variable names (strings)
+		           and their values (numbers). These variables typically
+		           parametrize an additional concentration, for example if the
+		           command line argument is 'HgCdTe 70%'.
+
+		Returns:
+		Material instance if the string is parsed correctly and refers to a material
+		that is defined. Otherwise, return None.
+		"""
+		if not isinstance(mat_id, str):
+			raise TypeError("Argument mat_id must be a string")
+		if isinstance(variables, list):
+			if len(variables) > 3:
+				sys.stderr.write("Warning (MaterialsList.get): Only the first three aterial variables are considered.\n")
+			vars_dict = dict(zip(['x', 'y', 'z'], variables))
+		elif isinstance(variables, dict):
+			vars_dict = variables
+		elif variables is None:
+			vars_dict = {}
+		else:
+			raise TypeError("Argument variables bust be None, a list, or a dict.")
+
+		# Find elemental composition and compositional ratio
+		if mat_id in self.materials:
+			mat = self.materials[mat_id]
+			mat = mat.evaluate(**vars_dict)
+			return mat
+		else:
+			el, co = formula_to_compound(mat_id)
+			if el is None:
+				sys.stderr.write(f"ERROR (MaterialsList.get): {mat_id} is neither a valid materials nor a valid chemical formula.\n")
+				return None
+			mat = None
+			for m_id, m in self.materials.items():
+				if el == m.get_elements():
+					mat = m
+					break
+			if mat is None:
+				formula = "".join(el)
+				sys.stderr.write(f"ERROR (MaterialsList.get): {mat_id} did not match a valid material by identical chemical formula {formula}.\n")
+				return None
+
+			# Try to find compositional variables x, y, z in the composition of
+			# the present material and try to match the compositional value
+			mat_co = mat.get_composition()
+			if isinstance(mat_co, AstParameter):
+				mat_co = mat_co.expand()
+			mat_co = [str(c) for c in mat_co]
+			for v in ['x', 'y', 'z']:
+				if v in mat_co:
+					idx = mat_co.index(v)
+					if co[idx] is not None:
+						vars_dict[v] = co[idx]
+				if v not in vars_dict and f'1 - {v}' in mat_co:
+					idx = mat_co.index(f'1 - {v}')
+					if co[idx] is not None:
+						vars_dict[v] = 1 - co[idx]
+
+			# Substitute compositional values and check if the result yields the
+			# same material as requested.
+			mat = mat.evaluate(**vars_dict)
+			mat_co = mat.get_composition()
+			if isinstance(mat_co, AstParameter):
+				mat_co = mat_co.expand()
+
+			equal = all([abs(c1 - c2) < 1e-6 if isinstance(c1, (float, int)) and isinstance(c2, (float, int)) else True for c1, c2 in zip(mat_co, co)])
+			if not equal:
+				sys.stderr.write(f"ERROR (MaterialsList.get): The resulting material {mat.name} and the requested one have different compositions {tuple(mat_co)} vs {tuple(co)}.\n")
+			return mat
+
+	def get_unique_material_id(self, mat_id):
+		"""Get a unique material id by appending hyphen and number"""
+		if mat_id not in self.materials:
+			return mat_id
+		m = re.fullmatch(r"(" + re_material_id + r")-[0-9]*", mat_id)
+		mat_name = mat_id if m is None else m.group(1)
+		n = 1
+		while f"{mat_name}-{n}" in self.materials:
+			n += 1
+		return f"{mat_name}-{n}"
+
+	def copy(self, mat_source, mat_target, file_str="", redef_warning=True):
+		"""Wrapper around Material.copy() that checks whether source and target are already defined"""
+		if mat_source not in self.materials:
+			sys.stderr.write(f"ERROR (MaterialsList.copy): Source material {mat_source} (for target {mat_target}) is not defined{file_str}.\n")
+			return None
+		if redef_warning and mat_target in self.materials:
+			sys.stderr.write(f"Warning (MaterialsList.copy): Material {mat_target} overwritten by copy of material {mat_source}{file_str}.\n")
+		self.materials[mat_target] = self.materials[mat_source].copy(mat_target)
+		return self.materials[mat_target]
+
+	def linearmix(self, mat1, mat2, var, mat_target, file_str="", redef_warning=True):
+		"""Wrapper around linearmix() that checks whether source and target are already defined"""
+		if isinstance(mat1, str):
+			if mat1 not in self.materials:
+				sys.stderr.write(f"ERROR (MaterialsList.linearmix): Material {mat1} for linearmix (target {mat_target}) is not defined{file_str}.\n")
+				return None
+			mat1 = self.materials[mat1]
+		elif not isinstance(mat1, Material):
+			raise TypeError("Argument mat1 must be a Material instance or str")
+		if isinstance(mat2, str):
+			if mat2 not in self.materials:
+				sys.stderr.write(f"ERROR (MaterialsList.linearmix): Material {mat2} for linearmix (target {mat_target}) is not defined{file_str}.\n")
+				return None
+			mat2 = self.materials[mat2]
+		elif not isinstance(mat2, Material):
+			raise TypeError("Argument mat2 must be a Material instance or str")
+		if redef_warning and mat_target in self.materials:
+			sys.stderr.write(f"Warning (MaterialsList.linearmix): Material {mat_target} overwritten by linearmix of materials {mat1.name} and {mat2.name}{file_str}.\n")
+		self.materials[mat_target] = linearmix(mat1, mat2, var, name=mat_target)
+		return self.materials[mat_target]
+
+	def parse_cmdarg(self, cmdarg):
+		"""Parse a 'matparam' command-line argument"""
+		if not isinstance(cmdarg, str):
+			raise TypeError("Argument must be a string")
+		if os.path.isfile(cmdarg):
+			self.load_from_file(cmdarg)  # TODO: Handle exceptions
+			return self
+		args = [arg.strip().lstrip() for arg in cmdarg.split(';')]
+		active_mat_id = None
+		re_param = r"[A-Za-z][A-Za-z0-9_]*"
+		for arg in args:
+			if len(arg) == 0:
+				continue
+			if '=' not in arg:
+				sys.stderr.write(f"ERROR (MaterialsList.parse_cmdarg): Material parameter argument '{arg}' is neither a valid file nor a valid key=value pair.\n")
+				continue
+			m = re.match(r"(?:(" + re_material_id + r")[:_.])?\s*(" + re_param + r")\s*=\s*(.*)", arg)
+			if m is None:
+				sys.stderr.write(f"ERROR (MaterialsList.parse_cmdarg): Material parameter argument '{arg}' is not a valid key=value pair.\n")
+				continue
+			mat_id, param, value = m.groups()
+			if mat_id is not None:
+				if mat_id not in self.materials:
+					sys.stderr.write(f"ERROR (MaterialsList.parse_cmdarg): Material id '{mat_id}' does not exist.\n")
+					continue
+				else:
+					active_mat_id = mat_id
+			if active_mat_id is None:
+				sys.stderr.write(f"ERROR (MaterialsList.parse_cmdarg): No material id set, so '{arg}' is ignored.\n")
+				continue
+			if param in material_parameters_alias:
+				param = material_parameters_alias[param]
+			if param not in material_parameters_default and not is_valid_parameter(param):
+				sys.stderr.write(f"ERROR (MaterialsList.parse_cmdarg): Invalid material parameter '{param}'.\n")
+				continue
+			if len(value) == 0:
+				sys.stderr.write(f"ERROR (MaterialsList.parse_cmdarg): No value in argument '{arg}'.\n")
+				continue
+			if param in ['compound', 'elements', 'composition']:
+				mat = self.materials[active_mat_id]
+				setter = getattr(mat, f'set_{param}')
+				try:
+					setter(value)
+				except Exception as ex:
+					ex_type = type(ex).__name__
+					ex_msg = ex.args[0] if ex.args[0].endswith('.') else ex.args[0] + '.'
+					sys.stderr.write(f"ERROR (MaterialsList.parse_cmdarg): {ex_type} in parameter '{param}': {ex_msg}\n")
+
+			elif param in ['copy', 'linearmix']:
+				sys.stderr.write(f"ERROR (MaterialsList.parse_cmdarg): Parameter '{param}' cannot be used for command-line definitions.\n")
+			elif param in ['name', 'param', 'variables']:
+				sys.stderr.write(f"ERROR (MaterialsList.parse_cmdarg): Parameter '{param}' is read-only.\n")
+			else:
+				mat = self.materials[active_mat_id]
+				try:
+					mat[param] = value
+				except Exception as ex:
+					ex_type = type(ex).__name__
+					ex_msg = ex.args[0] if ex.args[0].endswith('.') else ex.args[0] + '.'
+					sys.stderr.write(f"ERROR (MaterialsList.parse_cmdarg): {ex_type} in parameter '{param}': {ex_msg}\n")
+
+		return
+
+	def parse_dict(self, mat_id, mat_param, unique=False, from_file=None, redef_warning=True):
+		"""Parse a dict loaded from a file and copy/mix/update/create a Material instance
+
+		Arguments:
+		mat_id     String. Material id, i.e., the dict key for the material
+		mat_param  A dict instance. Contains the unparsed material parameters,
+		           i.e., the values are expected to be strings.
+		unique     True or False. If True, create a copy with a unique material
+		           id if there is a material with the same id. If False, allow
+		           existing materials to be updated.
+		from_file  String or None. The file from which the dict is taken. This
+		           is relevant only for warning and error messages.
+		redef_warning  True or False. If True, show a warning when an existing
+		               material is updated.
+
+		Returns:
+		mat_new    Material instance. This is the Material that has been created
+		           or updated. Note that this instance has been added to the
+		           MaterialsList already.
+		"""
+		file_str = f" (from {from_file})" if from_file is not None else ""
+		if unique and mat_id in self.materials:
+			mat_id = self.get_unique_material_id(mat_id)
+		composition = mat_param.pop('composition', None)
+		compound = mat_param.pop('compound', None)
+		elements = mat_param.pop('elements', None)
+		if 'copy' in mat_param:
+			mat_source = mat_param.pop('copy', "")
+			mat_new = self.copy(mat_source, mat_id, file_str=file_str, redef_warning=redef_warning)
+		elif 'linearmix' in mat_param:
+			try:
+				linearmix_args = mat_param.pop('linearmix', ["", "", "x"])
+				mat1_id, mat2_id, var = [s.strip().lstrip() for s in linearmix_args.split(',')]
+			except:
+				sys.stderr.write("ERROR (MaterialsList.load_from_file): Parameter linearmix must be of the form 'mat1, mat2, variable'{file_str}.\n")
+				return None
+			mat_new = self.linearmix(mat1_id, mat2_id, var, mat_id, file_str=file_str, redef_warning=redef_warning)
+		elif mat_id in self.materials:
+			if redef_warning:
+				sys.stderr.write(f"Warning (MaterialsList.load_from_file): Update existing material {mat_id}{file_str}.\n")
+			mat_new = self.materials[mat_id]
+		else:
+			self.materials[mat_id] = Material(mat_id, param = mat_param)
+			mat_new = self.materials[mat_id]
+
+		if mat_new is None:
+			return None
+
+		if compound is not None:
+			mat_new.set_compound(compound)
+		if elements is not None:
+			mat_new.set_elements(elements)
+		if composition is not None:
+			mat_new.set_composition(composition)
+		mat_new.update(mat_param)
+		return mat_new
+
+	def load_from_file(self, filename, verbose=False, redef_warning=True):
+		if not os.path.isfile(filename):
+			sys.stderr.write(f"ERROR (MaterialsList.load_from_file): File {filename} does not exist.\n")
+			return self
+		parser = configparser.ConfigParser()
+		parser.optionxform = str  # Do not convert keys to lowercase
+		try:
+			parser.read(filename)
+		except configparser.Error as e:
+			exception_type = type(e).__name__
+			exception_message = e.message.replace('\n', ' ')
+			sys.stderr.write(f"ERROR (MaterialsList.load_from_file): Error parsing materials file {filename}: {exception_type}: {exception_message}\n")
+			return self
+
+		for mat_id in parser.sections():
+			if not re.fullmatch(re_material_id, mat_id):
+				sys.stderr.write(f"ERROR (MaterialsList.load_from_file): Invalid material id [{mat_id}] in file {filename}.\n")
+				continue
+			mat_param = dict(parser[mat_id])
+			self.parse_dict(mat_id, mat_param, from_file=filename, redef_warning=redef_warning)
+
+		if verbose:
+			self.dump()
+		return self
+
+	def dump(self, substitute=False, stream=sys.stdout):
+		"""Print all material parameters (for debugging)
+
+		See Material.dump() for more information.
+		"""
+		for mat_id, mat in self.materials.items():
+			stream.write(f"[{mat_id}]\n")
+			mat.dump(substitute=substitute, stream=stream)
+			stream.write("\n")
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/models.py b/kdotpy-v1.0.0/src/kdotpy/models.py
new file mode 100644
index 0000000000000000000000000000000000000000..8bead36cf0b41adedc0dff384c86babf64316b5f
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/models.py
@@ -0,0 +1,708 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import sys
+import numpy as np
+
+from . import resumetools as resume
+from .cmdargs import resume_from
+from .config import get_config, get_config_num, get_config_bool
+from .momentum import Vector, VectorGrid, locations_index
+from .diagonalization import DiagDataPoint
+from .symbolic import SymbolicHamiltonian
+from .parallel import job_monitor_k_b
+from . import hamiltonian as hm
+from .diagonalization.diagsolver import CupyShiftInvEigshSolver, JaxEighSolver
+from .lltools import delta_n_ll, scaleup_eivec, scaleup_full_eivec
+from .transitions import get_transitions, get_transitions_full
+from .berry import berrycurv_k, chernnumber_ll, chernnumber_ll_full
+
+from .tasks import Task, TaskWrapper
+
+
+class ModelBase:
+    """Basic dummy class for calculation models. This contains all function definitions independent of the exact model (e.g. 1D dispersion, LL mode, etc).
+
+    Developer note:
+    This class works together with classes DiagData(Point) and tasks.py to
+    enable flexible configuration of multiprocessing and multithreading in
+    kdotpy. While a DiagDataPoint instance holds the parameters for the model
+    that are unique to that point, the Model class defines the which steps need
+    to be calculated for all points (e.g. Hamiltonian construction,
+    diagonalization, postprocessing). Tasks hold the information needed to
+    dispatch all model steps for each DiagDataPoint to worker threads/processes.
+
+    Since sending objects between processes in python involves (un-)pickling all
+    the necessary information into a byte stream, some wrappers are needed to
+    correctly execute the code in worker and main threads. While the actual
+    calculation workload happens in the worker processes, the data set needs to
+    be passed back to the main process via callbacks, where data is updated.
+    Processes do not share RAM!
+
+    Public functions of this class used for steps and callbacks must return a
+    function handle, which is stored in the Task instance. We use private
+    (starting with '_') functions with the same name to for the actual
+    implementations of the steps and callbacks. Note that steps are executed in
+    the worker context, while callbacks are executed in the main process.
+
+    See GitLab MR !4 or issues #24,#25 for more information.
+    """
+
+    def __init__(self, model_opts):
+        """Initialization routine for all models. Step and Callback function
+        lists should point to public functions of this class."""
+        self.model_opts = model_opts
+        self.steps = [self.load_ddp, self.construct_ham, self.solve_ham, self.post_solve]
+        self.callbacks = [self.resume_ddp, self.update_ham, self.update_ddp, self.update_ddp]
+        self.step_policy = ['process', 'process', 'process', 'process']
+        self.step_prio = [3, 2, 1, 0]
+        self.threads = [1] * len(self.steps)
+        self.gpu_workers = [0] * len(self.steps)
+        if 'solver' in model_opts:
+            solver = model_opts['solver']
+            for i, step in enumerate(self.steps):
+                # this is better than self.steps.index(), in case we'd ever have no or multiple solve steps.
+                if step == self.solve_ham:
+                    self.threads[i] = solver.num_threads
+                    self.step_policy[i] = solver.worker_type
+                    self.solve_step_idx = i
+                    if isinstance(solver, CupyShiftInvEigshSolver) or isinstance(solver, JaxEighSolver):
+                        self.gpu_workers[i] = 1
+                if solver.num_processes == 1:
+                    # Don't use parallel worker pools, if only one worker is
+                    # used. This enables parallel construction of a single
+                    # Hamiltonian, which starts its own process pool.
+                    self.step_policy[i] = None
+        self.resume_path, self.resume_step = resume_from()
+        self.group_steps = get_config_bool('tasks_grouped')
+
+        # Do not run post_solve step if no observables need to be calculated
+        if "obs" in model_opts or "dimful_obs" in model_opts:
+            self.run_post_solve = True
+        else:
+            self.run_post_solve = False
+
+    def step_grouping_wrapper(self, ddp):
+        """Public task generator.
+                Packs function handle to model code implementation (private function with same name)
+                and DiagDataPoint (parameters) into a object that can be pickled."""
+        return TaskWrapper(self._step_grouping_wrapper, ddp).run
+
+    def _step_grouping_wrapper(self, ddp):
+        """This is just a wrapper method to call all single steps from one single
+        Task and worker. It can be useful to get around issues with pickling of
+        very large eivecs in some cases."""
+        step = ddp.current_step - 1  # This needs to be reset to correct value after the enqueue_task() method.
+        # Do all steps with callback except last pair (self.steps[step](ddp)())
+        # will call the .run function of each step's TaskWrapper locally and
+        # directly forward the result into the callback function.
+        # The advantage here is, that the single TaskWrappers don't need to be
+        # pickled, but only the TaskWrapper object of this grouping wrapper
+        # method (which in most cases does not include (possibly very large)
+        # eigenvectors).
+        while step < len(self.steps) - 1:
+            self.callbacks[step](ddp)(self.steps[step](ddp)())
+            step += 1
+            ddp.current_step = step  # this works, as the ddp is still in the same context as steps and callbacks
+        # Do last step (callback must be handled externally in main thread):
+        ddp.current_step += 1
+        return self.steps[step](ddp)()
+
+    def enqueue_task(self, ddp, queue, progress_monitor=None):
+        """Generate and enqueue a task for the next step of a given study point.
+        It is only necessary to call this once per DiagDataPoint, as the task
+        callback is automatically extended to enqueue the next step."""
+        step = ddp.current_step if ddp.current_step is not None else 0
+        if step >= len(self.steps):
+            if progress_monitor is not None:
+                progress_monitor.show(progress_monitor.jobs_done + 1)
+            return
+            # raise ValueError('No more steps left for %s' % study_point)
+        if self.resume_path is None and self.steps[step] == self.load_ddp:
+            step += 1  # skip load_ddp step silently if option is not requested
+        ddp.current_step = step + 1
+        f_step = self.steps[step]
+        name = '%s: %s' % (f_step.__name__, job_monitor_k_b(ddp.k, ddp.paramval))
+
+        def callback(*args, **kwds):
+            """Use predefined callbacks, afterwards enqueue next Task"""
+            cb_func = self.callbacks[step if not self.group_steps else -1]
+            if cb_func is not None:
+                cb_func(ddp)(*args, **kwds)
+            # Skip post_solve step if not requested
+            if not self.run_post_solve:
+                if self.steps[step+1] == self.post_solve:
+                    if progress_monitor is not None:
+                        progress_monitor.show(progress_monitor.jobs_done + 1)
+                    return
+            return self.enqueue_task(ddp, queue, progress_monitor)
+
+        # Create the task. It is put into queue automatically
+        if self.group_steps:
+            Task(queue, name='Grouped tasks: %s' % job_monitor_k_b(ddp.k, ddp.paramval),
+                 worker_func=self.step_grouping_wrapper(ddp), callback_func=callback,
+                 worker_type=self.step_policy[self.solve_step_idx],
+                 n_threads=self.threads[self.solve_step_idx],
+                 gpu_workers=self.gpu_workers[self.solve_step_idx],
+                 priority=(0, 0 if ddp.grid_index is None else ddp.grid_index))
+        else:
+            Task(queue, name=name, worker_func=f_step(ddp), callback_func=callback,
+                 worker_type=self.step_policy[step], n_threads=self.threads[step],
+                 gpu_workers=self.gpu_workers[step],
+                 priority=(self.step_prio[step], 0 if ddp.grid_index is None else ddp.grid_index))
+        return
+
+    def load_ddp(self, ddp):
+        """Public task generator.
+        Packs function handle to model code implementation (private function
+        with same name) and DiagDataPoint (parameters) into a object that can be
+        pickled."""
+        return TaskWrapper(self._load_ddp, ddp).run
+
+    def _load_ddp(self, ddp):
+        """Actual (private) step implementation for loading temporary saved
+        DiagDataPoint data from file. Since ddp.current_step is also read from
+        file, steps that have already passed for this DiagDataPoint instance
+        will be skipped automatically. Runs on worker process/thread."""
+        if self.resume_path is not None:
+            return resume.load_ddp_tempfile(ddp, self.resume_path)
+        else:
+            return None
+
+    def resume_ddp(self, ddp):
+        """Public function handle generator for a callback function.
+        Callback after loading a DiagDataPoint instance"""
+        def resume_callback(loaded_ddp):
+            """Actual (private) callback implementation for updating a
+            DiagDataPoint instance, if it has been loaded from file
+            successfully. Runs in main thread."""
+            if loaded_ddp is not None:
+                if self.resume_step is not None:
+                    # Manually overwrite step to resume from, if requested.
+                    loaded_ddp.current_step = self.resume_step
+                # Do the normal update procedure
+                self.update_ddp(ddp)(loaded_ddp)
+
+        return resume_callback
+
+    def _construct_ham(self, ddp, **ignored_opts):
+        """Actual implementation (private) of Hamiltonian construction.
+        To be overwritten by child classes."""
+        raise NotImplementedError("Class %s has no 'construct_ham' step." % self.__class__.__name__)
+
+    def construct_ham(self, ddp):
+        """Public task generator. Step: Hamiltonian construction."""
+        return TaskWrapper(self._construct_ham, ddp, **self.model_opts, **ddp.opts).run
+
+    def update_ham(self, ddp):
+        """Public function handle generator for a callback function.
+        Callback after construction of Hamiltonian."""
+        def update(ham):
+            """Actual implementation. Updates the DiagDataPoint instance in the main thread."""
+            ddp.ham = ham
+
+        return update
+
+    def _solve_ham(self, ddp, solver = None, **ignored_opts):
+        """Actual implementation (private) of diagonalization of Hamiltonian.
+        Default minimal solve implementation. Solver argument is taken from
+        modelopts dict."""
+        eival, eivec = solver.solve(ddp.ham)
+        return DiagDataPoint(ddp.k, eival, eivec, paramval=ddp.paramval)
+
+    def solve_ham(self, ddp):
+        """Public task generator. Step: Diagonalization of Hamiltonian."""
+        return TaskWrapper(self._solve_ham, ddp, **self.model_opts, **ddp.opts).run
+
+    def _post_solve(self, ddp, **ignored_opts):
+        """Actual implementation (internal) of DiagDataPoint post-solve processing.
+        To be overwritten by child classes."""
+        raise NotImplementedError("Class %s has no 'post_solve' step." % self.__class__.__name__)
+
+    def post_solve(self, ddp):
+        """Public task generator. Step: DiagDataPoint post-solve processing."""
+        return TaskWrapper(self._post_solve, ddp, **self.model_opts, **ddp.opts).run
+
+    def update_ddp(self, ddp):
+        """Public function handle generator for a callback function.
+        Callback after changes in the DiagDataPoint instance."""
+        def update(new_ddp):
+            """Actual implementation. Updates the DiagDataPoint instance in the main thread."""
+            ddp.update(new_ddp)
+            if 'tempout' in sys.argv:
+                resume.save_ddp_tempfile(ddp)
+
+        return update
+
+
+class ModelLL(ModelBase):
+    """Model for LL dispersion mode - Symbolic Hamiltonian version."""
+
+    def _construct_ham(
+            self, ddp, ll_max=None, h_sym=None, params=None, pot=None,
+            ll_mode='full', split=0.0, lattice_reg=False, ignorestrain=False,
+            axial=True, solver=None, h_sym_opts=None, **ignored_opts):
+        """Actual (private) step implementation for Hamiltonian construction.
+        Runs on worker process/thread.
+
+        Arguments:
+        ddp            DiagDataPoint instance with specific parameters.
+        ll_max         Integer. Maximum LL index.
+        h_sym          SymbolicHamiltonian instance. The Hamiltonian.
+        params         PhysParams instance.
+        pot            Array. Potential V(z) in meV as function of position.
+        ll_mode        LL calculation mode: legacy, sym or full
+
+        Following arguments apply only to ll_mode 'legacy':
+        split          Float. Amount of degeneracy lifting at zero magnetic
+                       field.
+        lattice_reg    True or False. Whether to apply lattice regularization
+                       (x -> sin x). Default set to False to match symbolic
+                       modes.
+        ignorestrain   True or False. If True, do not include strain terms in
+	                   the Hamiltonian.
+        axial          True or False. If True, apply axial approximation. If
+	                   False, include non-axial terms in the Hamiltonian.
+        solver         DiagSolver instance. Set hllsplit magnitude from solver
+                       precision.
+        h_sym_opts     Modelopts dict for per-DDP construction of symbolic
+                       Hamiltonian. Only required in ll_mode 'full' or 'sym' if
+                       no constant h_sym can be given.
+
+        Returns:
+        Tuple with either
+        - 'full' mode: Hamiltonian and zero-field split Hamiltonian
+        - 'sym' or 'legacy' mode: List of single LL Hamiltonians and None (no
+          splitting correction!)
+        The last element of the tuple is the symbolic Hamiltonian for this DDP,
+        if a new construction was necessary.
+        """
+        magn = ddp.paramval
+        if ll_mode in ['full', 'sym'] and h_sym is None:
+            # Calculate a symbolic Hamiltonian, if required, but not given. May
+            # be the case if variable in-plane magnetic fields are present and
+            # no single symbolic Hamiltonian can be defined.
+            h_sym = SymbolicHamiltonian(
+                hm.hz_sparse_split, (params,), h_sym_opts, hmagn = False, b0 = magn)
+            h_sym_return = h_sym
+        else:
+            h_sym_return = None
+        if ll_mode == 'full':
+            ham = hm.hz_sparse_ll_full(h_sym, ll_max, magn, params.norbitals)
+            hllsplit = None
+            if abs(magn) < 1e-6:
+                ll_split = 1e-8 if solver is None or solver.dtype == np.complex128 else 1e-3
+                hllsplit = ll_split * hm.hsplit_ll_full(ll_max, nz=params.nz, norb=params.norbitals)
+                ham += hllsplit
+            if pot is not None:
+                hpot = hm.hz_sparse_pot_ll_full(params, ll_max, pot, norb=params.norbitals)
+                ham += hpot
+            return ham, hllsplit, h_sym_return
+        elif ll_mode == 'sym':
+            magnz = magn.z() if isinstance(magn, Vector) else magn
+            delta_n_vec = delta_n_ll(params.norbitals, magnz)
+            ham_list = []
+            for n in range(-2, ll_max + 1):
+                ham = h_sym.ll_evaluate(n, magn, delta_n_vec)
+                if pot is not None:
+                    nbands = np.count_nonzero(delta_n_vec + n >= 0)
+                    hpot = hm.hz_sparse_pot(params, pot, norb=nbands)
+                    ham += hpot
+                ham_list.append(ham)
+            return ham_list, None, h_sym_return
+        elif ll_mode == 'legacy':
+            ham_list = []
+            for n in range(-2, ll_max + 1):
+                ham = hm.hz_sparse_ll(
+                    magn, n, params, lattice_reg = lattice_reg,
+                    split = split if magn == 0 else 0, ignorestrain = ignorestrain,
+                    axial = axial)
+                if pot is not None:
+                    if params.norbitals == 8:
+                        nbands = 1 if n == -2 else 4 if n == -1 else 7 if n == 0 else 8
+                    else:
+                        nbands = 1 if n == -2 else 3 if n == -1 else 5 if n == 0 else 6
+                    hpot = hm.hz_sparse_pot(params, pot, norb = nbands)
+                    ham += hpot
+                ham_list.append(ham)
+            return ham_list, None, h_sym_return
+
+    def update_ham(self, ddp):
+        """Public function handle generator for a callback function.
+        Callback after construction of Hamiltonian.
+        Overwritten parent function, as this model has an additional Hamiltonian part."""
+        def update(ham):
+            """Actual implementation. Updates the DiagDataPoint instance in the main thread."""
+            ddp.ham, ddp.hllsplit, ddp.h_sym = ham
+
+        return update
+
+    def _solve_ham(self, ddp, solver=None, params=None, ll_mode='full', ll_max=None, **ignored_opts):
+        """Actual (private) step implementation for Hamiltonian diagonalization.
+        Runs on worker process/thread.
+
+        Arguments:
+        ddp            DiagDataPoint instance with specific parameters.
+        solver		   DiagSolver instance.
+        params         PhysParams instance.
+        ll_mode        LL calculation mode.
+        ll_max         Maximum LL in ll_mode 'full'
+
+        Returns:
+        A DiagDataPoint instance (not connected to a DiagData instance).
+        """
+        magn = ddp.paramval
+        magnz = magn.z() if isinstance(magn, Vector) else magn
+        llindex = None
+        if ll_mode == 'full':
+            eival, eivec = solver.solve(ddp.ham)
+            # Correct for degeneracy lifting
+            if abs(magn) < 1e-6:
+                print("Degeneracy between Landau levels was lifted at B = %s" % magn)
+                delta_eival = np.real(
+                    np.array([np.vdot(eivec[:, j], ddp.hllsplit.dot(eivec[:, j])) for j in range(0, len(eival))]))
+                eival -= delta_eival
+            ddp.hllsplit = None  # delete split hamiltonian
+            eivec = scaleup_full_eivec(eivec, params, len(eival), ll_max, magnz).T
+        elif ll_mode in ['sym', 'legacy']:
+            eival = []
+            eivec = []
+            ll_n = []
+            for n, ham in enumerate(ddp.ham):  # n-2 is LL index
+                eival1, eivec1 = solver.solve(ham)
+                eival.extend(eival1)
+                eivec.extend(scaleup_eivec(eivec1, params, len(eival1), n-2, magnz))
+                ll_n.extend(np.full(len(eival1), n-2))
+            eival = np.array(eival)
+            eivec = np.array(eivec)
+            llindex = np.array(ll_n)
+        else:
+            eival, eivec = None, None
+        # ddp.ham will be cleared in new DiagDataPoint instance
+        # ddp.current_step and ddp.grid_index will be kept via ddp.update(), if they are None.
+        ddp = DiagDataPoint(0.0, eival, eivec, paramval=magn)
+        ddp.llindex = llindex
+        return ddp
+
+    def _post_solve(
+            self, ddp=None, ll_max=None, h_sym=None, params=None, obs=None,
+            obs_prop=None, return_eivec=False, overlap_eivec=None, berry=False,
+            transitions=False, transitions_range=None, wflocations=None,
+            ll_mode='full', **ignored_opts):
+        """Actual (private) step implementation for DiagDataPoint post solve processing.
+        Runs on worker process/thread.
+
+        Arguments:
+        ddp            DiagDataPoint instance with specific parameters.
+        params         PhysParams instance.
+        obs            List of strings or None. Observable ids of the
+                       observables that will be calculated. If None or empty
+                       list, do not do anything.
+        obs_prop       ObservableList instance containing all observable
+                       properties.
+        return_eivec   True, False or None. If True, keep eigenvector data in
+                       the return value (DiagDataPoint instance). If False,
+                       discard them. If None, discard them only if observables
+                       have been calculated.
+        overlap_eivec  A dict, whose keys are the band labels (characters) and
+                       values are the eigenvectors for which overlaps can be
+                       calculated with the eigenvectors of this Hamiltonian.
+        berry          2-tuple, True or False. If a 2-tuple of integers,
+                       calculate Berry curvature for bands with indices in this
+                       range. If True, calculate Berry curvature for all states.
+                       If False, do not calculate Berry curvature.
+        transitions    True or False, or float. If True or a float, calculate
+                       optical transitions, where a float indicates the minimum
+                       transition amplitude, below which the transitions are
+                       discarded. If False, do not calculate transitions.
+        transitions_range  2-tuple or None. If set, calculate optical
+                           transitions only for states in that energy range. If
+                           None, do not restrict to an energy range.
+        wflocations    List, array, or VectorGrid instance. Contains the
+                       magnetic field values where wave functions should be
+                       saved (plot and table). None if no wave functions should
+                       be saved.
+        ll_mode        LL calculation mode.
+
+        Returns:
+        A DiagDataPoint instance (not connected to a DiagData instance).
+        """
+        magn = ddp.paramval
+        magnz = magn.z() if isinstance(magn, Vector) else magn
+
+        ddp.calculate_observables(params, obs, obs_prop=obs_prop, overlap_eivec=overlap_eivec, magn=magn)
+
+        if ll_mode in ['full', 'sym'] and h_sym is None:
+            h_sym = ddp.h_sym
+
+        if ll_mode != 'legacy':
+            if berry:
+                which = berry if isinstance(berry, tuple) else None
+                if magn == 0.0:
+                    ddp.set_observable_value('chern', None, 0.0)
+                    ddp.set_observable_value('chernsim', None, 0.0)
+                else:
+                    func_handle = chernnumber_ll_full if ll_mode == 'full' else chernnumber_ll
+                    bc_val, bc_ei, bc_ll = func_handle(ddp, magn, h_sym, ll_max, which=which, norb=params.norbitals)
+                    ddp.set_observable_value('chern', bc_ei, np.asarray(bc_val))
+                    ddp.set_observable_value('chernsim', None, 1.0)
+
+            if transitions:
+                ampmin = transitions if isinstance(transitions, (float, np.floating)) else None
+                func_handle = get_transitions_full if ll_mode == 'full' else get_transitions
+                td = func_handle(ddp, magn, h_sym, which=transitions_range, ampmin=ampmin, norb=params.norbitals, nll=ll_max + 3)
+                td.sort(in_place=True, llsort=(ll_mode != 'full'))
+                ddp.transitions = td
+
+        if isinstance(wflocations, (list, np.ndarray, VectorGrid)):
+            wfmagn = magn if isinstance(magn, Vector) else Vector(magn, astype='z')
+            if locations_index(wflocations, wfmagn, vec_numeric = magnz) is not None:
+                return_eivec = True
+
+        if ddp.h_sym is not None:
+            # Clean-up temporary symbolic hamiltonian per DDP, as it is not required any more.
+            del ddp.h_sym
+        save_ddp = get_config('diag_save_binary_ddp')
+        if save_ddp in ['numpy', 'npz']:
+            npz_filename = "ddp_%s_%s.npz" % (ddp.file_id(), ddp.hash_id())
+            ddp.to_binary_file(npz_filename)
+        elif save_ddp in ['hdf5', 'h5']:
+            h5_filename = "ddps.h5"
+            ddp.to_binary_file(h5_filename)
+        if not return_eivec:
+            ddp.delete_eivec()
+        return ddp
+
+class ModelMomentum1D(ModelBase):
+    """Model for k 1D (dispersion) mode."""
+
+    def _construct_ham(
+            self, ddp, params = None, periodicy = False, lattice_reg = False,
+            split = 0.0, splittype = 'auto', ignorestrain = False, gauge_zero = 0.0,
+            solver = None, axial = True, pot = None, poty = None, bia = False,
+            ignore_magnxy = False, **ignored_opts):
+        """Actual (private) step implementation for Hamiltonian construction.
+        Runs on worker process/thread.
+
+        Arguments:
+        ddp            DiagDataPoint instance with specific parameters.
+        params         PhysParams instance.energy
+        periodicy      True or False. Whether the geometry in the transversal
+                       (y) direction is periodic/cylindrical (True) or finite
+                       (False).
+        lattice_reg    True or False. Whether to apply lattice regularization
+                       (x -> sin x).
+        split          Float. Amount of degeneracy lifting.
+        splittype      String. Type of degeneracy lifting.
+        ignorestrain   True or False. If True, do not include strain terms in
+                       the Hamiltonian.
+        gauge_zero     Float. Shifts the gauge field by this amount. See
+                       hamiltonian/full.py.
+        solver		   DiagSolver instance
+        axial          True or False. If True, apply axial approximation. If
+                       False, include non-axial terms in the Hamiltonian.
+        pot            Array, 1- or 2-dimensional. Potential V(z) (1-dim) or
+                       V(z, y) (2-dim) in meV as function of position.
+        poty           Array, 1-dimensional. Only if pot is also 1-dimensional
+                       or None. Potential V(y) as function of position.
+        bia            True or False. If True, include BIA terms in the
+                       Hamiltonian.
+        ignore_magnxy  True or False. If True, neglect the in-plane components
+                       of the orbital part of the magnetic field. Only for
+                       legacy reasons, e.g., comparing with results that were
+                       calculated when these terms were not yet implemented.
+
+        Returns:
+        A DiagDataPoint instance.
+        """
+        b = ddp.paramval
+        kx, ky = ddp.k.xy()
+        if abs(ky) > 1e-6:
+            sys.stderr.write("ERROR (ModelMomentum1D._construct_ham): y component of the momentum must be zero\n")
+        kterms = hm.h_kterms(params, axial=axial) if params.lattice_transformed() else None
+        if b == 0.0:
+            ham = hm.hzy_sparse(
+                kx, 0.0, params, periodicy=periodicy, solver=solver,
+                lattice_reg=lattice_reg, ignorestrain=ignorestrain, axial=axial,
+                bia=bia, kterms=kterms)
+        else:
+            ham = hm.hzy_sparse_magn(
+                kx, b, params, periodicy=periodicy, solver=solver,
+                lattice_reg=lattice_reg, ignorestrain=ignorestrain, axial=axial,
+                bia=bia, kterms=kterms, gauge_zero=gauge_zero,
+                ignore_magnxy=ignore_magnxy)
+        if split != 0.0:
+            hamsplit = split * hm.hsplit_full(params, splittype, k=[kx], bia=bia, lattice_reg=lattice_reg)
+            ham += hamsplit
+
+        if pot is not None:
+            ham += hm.h_pot_1d(pot, params)
+        if poty is not None:
+            ham += hm.h_pot_1d(poty, params, axis='y')
+        return ham
+
+    def _post_solve(
+            self, ddp, params = None, obs = None, obs_prop = None,
+            overlap_eivec = None, ignorestrain = False, axial = True,
+            split = 0.0, lattice_reg = False, berry = False, return_eivec = None,
+            wflocations = None, **ignored_opts):
+        """Actual (private) step implementation for DiagDataPoint post solve processing.
+        Runs on worker process/thread.
+
+        Arguments:
+        ddp            DiagDataPoint instance with specific parameters.
+        params         PhysParams instance.
+        obs            List of strings or None. Observable ids of the
+                       observables that will be calculated. If None or empty
+                       list, do not do anything.
+        obs_prop       ObservableList instance containing all observable
+                       properties.
+        overlap_eivec  A dict, whose keys are the band labels (characters) and
+                       values are the eigenvectors for which overlaps can be
+                       calculated with the eigenvectors of this Hamiltonian.
+        ignorestrain   True or False. If True, do not include strain terms in
+                       the Hamiltonian.
+        axial          True or False. If True, apply axial approximation. If
+                       False, include non-axial terms in the Hamiltonian.
+        split          Float. Amount of degeneracy lifting.
+        lattice_reg    True or False. Whether to apply lattice regularization
+                       (x -> sin x).
+        berry          2-tuple, True or False. If a 2-tuple of integers,
+                       calculate Berry curvature for bands with indices in this
+                       range. If True, calculate Berry curvature for all states.
+                       If False, do not calculate Berry curvature.
+        return_eivec   True, False or None. If True, keep eigenvector data in
+                       the return value (DiagDataPoint instance). If False,
+                       discard them. If None, discard them only if observables
+                       have been calculated.
+        wflocations    List, array, or VectorGrid instance. Contains the
+                       magnetic field values where wave functions should be
+                       saved (plot and table). None if no wave functions should
+                       be saved.
+
+        Returns:
+        A DiagDataPoint instance.
+        """
+        ddp.calculate_observables(
+            params, obs, obs_prop = obs_prop, overlap_eivec = overlap_eivec,
+            magn = ddp.paramval)
+
+        if berry:
+            berry_dk = get_config_num('berry_dk', minval=0)
+            if berry_dk == 0:
+                sys.stderr.write(
+                    "ERROR (diagonalization.hz): Berry curvature momentum step must be a positive number.\n")
+                raise ValueError
+            which = berry if isinstance(berry, tuple) else None
+            bc_val, bc_ei, _ = berrycurv_k(
+                ddp, hm.hz_sparse_split, params, dk=berry_dk, which=which,
+                lattice_reg=lattice_reg, split=split, ignorestrain=ignorestrain,
+                axial=axial)
+            ddp.set_observable_value('berry', bc_ei, np.asarray(bc_val))
+            ibc_val = ddp.get_observable('berry') * ddp.get_observable('isopz')
+            ddp.set_observable_value('berryiso', np.arange(0, ddp.neig), ibc_val)
+
+        # Wave functions
+        if isinstance(wflocations, (list, np.ndarray, VectorGrid)):
+            if locations_index(wflocations, ddp.k) is not None:
+                return_eivec = True
+
+        save_ddp = get_config('diag_save_binary_ddp')
+        if save_ddp in ['numpy', 'npz']:
+            npz_filename = "ddp_%s_%s.npz" % (ddp.file_id(), ddp.hash_id())
+            ddp.to_binary_file(npz_filename)
+        elif save_ddp in ['hdf5', 'h5']:
+            h5_filename = "ddps.h5"
+            ddp.to_binary_file(h5_filename)
+        if return_eivec is None:
+            return_eivec = (obs is None or obs == [])
+        if not return_eivec:
+            ddp.delete_eivec()
+        return ddp
+
+class ModelMomentum2D(ModelMomentum1D):
+    """Model for k 2D (dispersion) mode.
+    Only differs from 1D model in Hamiltonian construction step.
+    """
+
+    def _construct_ham(
+            self, ddp, params = None, lattice_reg = False, split = 0.0,
+            splittype = 'auto', ignorestrain = False, solver = None, axial = True,
+            pot = None, bia = False, ignore_magnxy = False, **ignored_opts):
+        """Actual (private) step implementation for Hamiltonian construction.
+        Runs on worker process/thread.
+
+        Arguments:
+        ddp            DiagDataPoint instance with specific parameters.
+        params         PhysParams instance.energy
+        lattice_reg    True or False. Whether to apply lattice regularization
+                       (x -> sin x).
+        split          Float. Amount of degeneracy lifting.
+        splittype      String. Type of degeneracy lifting.
+        ignorestrain   True or False. If True, do not include strain terms in the
+                       Hamiltonian.
+        solver		   DiagSolver instance
+        axial          True or False. If True, apply axial approximation. If False,
+                       include non-axial terms in the Hamiltonian.
+        pot            Array, 1- or 2-dimensional. Potential V(z) (1-dim) or V(z, y)
+                       (2-dim) in meV as function of position.
+        bia            True or False. If True, include BIA terms in the Hamiltonian.
+        ignore_magnxy  True or False. If True, neglect the in-plane components of
+        		       the orbital part of the magnetic field. Only for legacy
+        		       reasons, e.g., comparing with results that were calculated
+        		       when these terms were not yet implemented.
+
+        Returns:
+        A DiagDataPoint instance.
+        """
+        b = ddp.paramval
+        kx, ky = ddp.k.xy()
+        kterms = hm.h_kterms(params, axial=axial) if params.lattice_transformed() else None
+        ham = hm.hz_sparse(
+            [kx, ky], b, params, solver=solver, lattice_reg=lattice_reg,
+            ignorestrain=ignorestrain, axial=axial, bia=bia, kterms=kterms,
+            ignore_magnxy=ignore_magnxy)
+        if split != 0.0:
+            hamsplit = split * hm.hsplit_full(params, splittype, k=[kx, ky], bia=bia, lattice_reg=lattice_reg)
+            ham += hamsplit
+        if pot is not None:
+            hpot = hm.hz_sparse_pot(params, pot)
+            ham += hpot
+        return ham
diff --git a/kdotpy-v1.0.0/src/kdotpy/momentum.py b/kdotpy-v1.0.0/src/kdotpy/momentum.py
new file mode 100644
index 0000000000000000000000000000000000000000..707c632858f9734cbf0d749ae40b6faf22acf255
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/momentum.py
@@ -0,0 +1,3148 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+from math import sin, cos, sqrt, acos, pi
+import numpy as np
+import sys
+import itertools
+from .config import get_config_bool
+
+## Parsing momentum values
+MomentumFormatError = "Momentum must be in format: k; (kx, ky); (k, phi, \"kphi\"); or (k, phi, \"deg\")"
+Momentum3DFormatError = "Momentum must be in format: k; (kx, ky); (k, phi, \"kphi\"); (k, phi, \"deg\"); or (kx, ky, kz)."
+
+degrees_by_default = False
+
+def isrealnum(x):
+	return isinstance(x, (float, np.floating, int, np.integer))
+
+def to_polar(x, y, deg = False):
+	"""Get polar coordinates (magnitude r, angle phi) from cartesian coordinates (x, y)"""
+	return (np.abs(x + 1.j * y), np.angle(x + 1.j * y, deg))
+
+def degstr(x):
+	"""Format value in degrees, which may be NaN."""
+	return " nan deg" if np.isnan(x) else "%4g" % x
+
+def diff_mod(x, y, m):
+	"""Difference of x and y modulo m."""
+	diff = np.abs(np.mod(x, m) - np.mod(y, m))
+	return np.minimum(diff, m - diff)
+
+def to_spherical(x, y, z, deg = False):
+	"""Get spherical coordinates (r, theta, phi) from cartesian coordinates (x, y, z)"""
+	rxy2 = x**2 + y**2 + z**2
+	if rxy2 == 0.0:
+		theta = 0.0 if z >= 0.0 else 180. if deg else pi
+		return abs(z), theta, 0.0
+	r = np.sqrt(x**2 + y**2 + z**2)
+	if deg:
+		theta = 90. if z == 0.0 else acos(z / r) * 180. / pi
+	else:
+		theta = pi / 2 if z == 0.0 else acos(z / r)
+	phi = np.angle(x + 1.j * y, deg)
+	return r, theta, phi
+
+def polar_fold(r, phi, deg = False, fold = True):
+	"""Fold polar coordinates.
+	Folding means that a polar coordinate will be brought into a canonical form
+	where the angle lies between -90 and +90 degrees (-pi/2 and pi/2), possibly
+	with a negative radius. The identity (x, y) = (r cos phi, r sin phi) is
+	preserved.
+
+	Arguments:
+	r, phi   Float. Radial and angular coordinates.
+	deg      True or False. Degrees or radians as angular units, respectively.
+	fold     True, False, or None. If True, fold. If False, return non-folded
+	         angular coordinate phi between -180 and 180 degrees (-pi and pi).
+	         If None, return the input as is.
+
+	Returns:
+	r, phi   New set of polar coordinates.
+	"""
+	if fold is None:
+		pass
+	elif deg:
+		if fold and r < 0.0:
+			r = -r
+			phi = (phi + 180.) % 360.
+		else:
+			phi = (phi + 180.) % 360. - 180.
+		if fold and phi > 90.:
+			r = -r
+			phi -= 180.
+		elif fold and phi <= -90.:
+			r = -r
+			phi += 180.
+	else:
+		if fold and r < 0.0:
+			r = -r
+			phi = (phi + pi) % (2 * pi)
+		else:
+			phi = (phi + pi) % (2 * pi) - pi
+		if fold and phi > 0.5 * pi:
+			r = -r
+			phi -= pi
+		elif fold and phi <= -0.5 * pi:
+			r = -r
+			phi += pi
+	return r, phi
+
+def spherical_fold(r, theta, phi, deg = False, fold = True):
+	"""Fold polar coordinates.
+	Folding means that a spherical coordinate will be brought into a canonical
+	form where the angle phi lies between -90 and +90 degrees (-pi/2 and pi/2).
+	The radius may be negative and the angle theta may be reflected (theta to
+	180 degrees minus theta). The identity
+	  (x, y, z) = (r sin theta cos phi, r sin theta sin phi, r cos theta)
+	is preserved.
+
+	Arguments:
+	r, theta, phi   Float. Spherical coordinates.
+	deg             True or False. Degrees or radians as angular units,
+	                respectively.
+	fold            True, False, or None. If True, fold. If False, return
+	                non-folded angular coordinate phi between -180 and 180
+	                degrees (-pi and pi). If None, return the input as is.
+
+	Returns:
+	r, theta, phi   New set of spherical coordinates.
+	"""
+	if fold is None:
+		pass
+	elif deg:
+		if theta < 0.0 or theta > 180.:
+			raise ValueError("Invalid value for theta")
+		if fold and r < 0.0:
+			r = -r
+			phi = (phi + 180.) % 360.
+			theta = 180. - theta
+		else:
+			phi = (phi + 180.) % 360. - 180.
+		if fold and theta == 90.:
+			r, phi = polar_fold(r, phi, deg, fold)
+		elif fold and theta > 90.:
+			r = -r
+			phi = phi % 360. - 180.
+			theta = 180. - theta
+	else:
+		if theta < 0.0 or theta > pi:
+			raise ValueError("Invalid value for theta")
+		if fold and r < 0.0:
+			r = -r
+			phi = (phi + pi) % (2 * pi)
+			theta = pi - theta
+		else:
+			phi = (phi + pi) % (2 * pi) - pi
+		if fold and theta == pi:
+			r, phi = polar_fold(r, phi, deg, fold)
+		elif fold and theta > pi:
+			r = -r
+			phi = phi % (2 * pi) - pi
+			theta = pi - theta
+	return r, theta, phi
+
+def add_var_prefix(var, prefix):
+	"""Add variable prefix to component var"""
+	return 'r' if len(prefix) == 0 and len(var) == 0 else prefix if var == 'r' else prefix + var
+
+# TODO: Check whether reflection functions are still necessary
+def no_reflect_array(arr):
+	"""Array identity transformation with mapping from new to old array"""
+	return arr, np.arange(0, len(arr), dtype = int)
+
+def reflect_array(arr, offset = 0.0):
+	"""Array reflections with mapping from new to old array"""
+	newval = np.sort(np.concatenate((arr, offset - arr)))
+	sel = np.concatenate(([True], np.diff(newval) > 1e-9))
+	newval = newval[sel]
+	# mapping; we give a slight 'bonus' to the original value
+	diff = np.minimum(np.abs(newval[:, np.newaxis] - arr[np.newaxis, :]) - 1e-9, np.abs(newval[:, np.newaxis] + arr[np.newaxis, :] - offset))
+	mapping = np.argmin(diff, axis = 1)
+	return newval, mapping
+
+def reflect_angular_array(arr, axis = None, deg = True):
+	"""Array reflections for angular arrays with mapping from new to old array"""
+	if axis is None:
+		axis = 'xy'
+	phimax = 180.0 if deg else np.pi
+	allvalues = (arr, -arr, phimax - arr, -phimax + arr)
+	which = np.array([0, 1, 2, 3] if 'x' in axis and 'y' in axis else [0, 2] if 'x' in axis else [0, 1] if 'y' in axis else [0])
+	newval = np.sort(np.concatenate(np.array(allvalues)[which]))
+	newval = newval[(newval < phimax + 1e-9) & (newval > -phimax - 1e-9)]
+	sel = np.concatenate(([True], np.diff(newval) > 1e-9))
+	newval = newval[sel]
+	# mapping; we give a slight 'bonus' to the original value
+	diff = np.amin(np.array((np.abs(newval[:, np.newaxis] - arr[np.newaxis, :]) - 1e-9, np.abs(newval[:, np.newaxis] + arr[np.newaxis, :]), np.abs(newval[:, np.newaxis] + arr[np.newaxis, :] - phimax), np.abs(newval[:, np.newaxis] - arr[np.newaxis, :] + phimax)))[which], axis = 0)
+	mapping = np.argmin(diff, axis = 1)
+	return newval, mapping
+
+def linear_integration_element(xval, dx = None, xmin = None, xmax = None, fullcircle = True):
+	"""Integration elements for linearly spaced grid.
+
+	Arguments:
+	xval         Float or array/list. If a float, calculate the size of the
+	             integration element [xval - dx/2, xval + dx/2]. If a list, then
+	             return the sizes of the intervals.
+	dx           Float or None. If set, size of the integration element. Used
+	             only if xval is a float.
+	xmin, xmax   Float or None. If not None, the minimum/maximum value of the
+	             integration interval. Used only if xval is a float.
+	fullcircle   True or False. If True, interpret the integration axis as
+	             an angular axis, by multiplying by 2 pi / interval size.
+
+	Returns:
+	Float or array (like xval)
+	"""
+	if isinstance(xval, (float, np.floating)) and dx is not None:
+		if fullcircle and (xmin is None or xmax is None):
+			raise ValueError("Cannot calculate integration element over full circle if minimum and maximum are not given")
+		mult = (2. * np.pi) / (xmax - xmin) if fullcircle else 1.0
+		if xmin is not None and xval < xmin - 0.5 * dx:
+			return 0
+		elif xmin is not None and xval < xmin + 0.5 * dx:
+			return mult * dx / 2
+		elif xmax is not None and xval > xmax + 0.5 * dx:
+			return 0
+		elif xmax is not None and xval > xmax - 0.5 * dx:
+			return mult * dx / 2
+		else:
+			return mult * dx
+	elif isinstance(xval, (np.ndarray, list)) and dx is None:
+		if xmin is not None or xmax is not None:
+			sys.stderr.write("Warning (linear_integration_element): Arguments xmin and xmax are ignored.\n")
+		xval = np.asarray(xval)
+		xmin = xval.min()
+		xmax = xval.max()
+		xbins = np.concatenate(([xmin], 0.5 * (xval[1:] + xval[:-1]), [xmax]))
+		mult = (2. * np.pi) / (xmax - xmin) if fullcircle else 1.0
+		# For debugging:
+		# print(kval, len(kval))
+		# print(kbins, len(kbins))
+		return (xbins[1:] - xbins[:-1]) * mult
+	else:
+		raise ValueError("Illegal combination of inputs")
+
+def quadratic_integration_element(kval, dk = None, kmax = None):
+	"""Integration elements, quadratic
+	Returns the area of the rings between radii [kk - dk/2, kk + dk/2] with a
+	lower radius of >= 0 and an upper radius of <= kmax
+
+	Arguments:
+	kval   Float or array/list. If a float, calculate the size of the
+	       integration element [kval - dk/2, xval + dk/2]. If a list, then
+	       return the sizes of the intervals.
+	dx     Float or None. If set, size of the integration element. Used only if
+	       kval is a float.
+	kmax   Float or None. If not None, the maximum value of the integration
+	       interval. Used only if kval is a float.
+
+	Returns:
+	Float or array (like kval)
+	"""
+	if isinstance(kval, (float, np.floating)) and dk is not None:
+		if kval < 0.5 * dk:
+			return (dk**2) / 8
+		elif kmax is not None and kval > kmax + 0.5 * dk:
+			return 0
+		elif kmax is not None and kval > kmax - 0.5 * dk:
+			return 0.5 * (kmax**2 - (kval - 0.5 * dk)**2)
+		else:
+			return kval * dk
+	elif isinstance(kval, (np.ndarray, list)) and dk is None:
+		if kmax is not None:
+			sys.stderr.write("Warning (quadratic_integration_element): Argument kmax is ignored.\n")
+		kval = np.asarray(kval)
+		kmin = kval.min()
+		kmax = kval.max()
+		kbins = np.concatenate(([kmin], 0.5 * (kval[1:] + kval[:-1]), [kmax]))
+		# For debugging:
+		# print(kval, len(kval))
+		# print(kbins, len(kbins))
+		return 0.5 * (kbins[1:]**2 - kbins[:-1]**2)
+	else:
+		raise ValueError("Illegal combination of inputs")
+
+def circular_integration_element(kval, dk = None, kmax = None, full = True):
+	"""Integration elements, circular extension of one-dimensional array
+	Wrapper around quadratic_integration_element that handles the extension of a
+	one-dimensional array to the full circle. If this extension is requested
+	(full = True), multiply by the correct angular volume element. See also
+	documentation for quadratic_integration_element().
+
+	Arguments:
+	kval   Float or array/list. If a float, calculate the size of the
+	       integration element [kval - dk/2, xval + dk/2]. If a list, then
+	       return the sizes of the intervals.
+	dx     Float or None. If set, size of the integration element. Used only if
+	       kval is a float.
+	kmax   Float or None. If not None, the maximum value of the integration
+	       interval. Used only if kval is a float.
+	full   True or False. Whether to extend to a full circle.
+
+	Returns:
+	Array
+	"""
+	dk2 = quadratic_integration_element(kval, dk, kmax)
+	if not full:
+		phimult = 1.0
+	elif kval.min() < -1e-8:
+		if np.amax(np.abs(kval + kval[::-1])) < 1e-8:  # check if array is symmetric around 0
+			phimult = np.pi
+		else:
+			sys.stderr.write("ERROR (circular_integration_element): One-dimensional array is two-sided and not symmetric. Integration element is ill-defined in this case.\n")
+			return None
+	else:
+		phimult = 2.0 * np.pi
+	return np.abs(dk2) * phimult
+
+class Vector:
+	"""Vector object
+
+	Attributes:
+	value     Float or tuple. The vector component(s).
+	vtype     String. The vector type, which defines the parametrization of the
+	          vector. Is one of: 'x', 'y', 'z', 'xy', 'xyz', 'pol', 'cyl',
+	          'sph'.
+	degrees   True, False or None. Whether angular units are degrees (True) or
+	          radians (False). None means unknown or undefined.
+	aunit     Float or None. Multiplier for angular coordinates. This is pi/180
+	          for degrees, 1 for radians, and None if the angular unit is
+	          unkwown.
+	"""
+	def __init__(self, *val, astype = None, deg = None):
+		if len(val) == 1 and isinstance(val[0], tuple):
+			val = val[0]
+		self.degrees = None
+		if len(val) == 1 and isrealnum(val[0]):
+			self.value = val
+			if astype in ['x', 'y', 'z']:
+				self.vtype = astype
+			elif astype is None:
+				self.vtype = 'x'
+			else:
+				raise ValueError("Invalid vector type")
+		elif len(val) == 2 and isrealnum(val[0]) and isrealnum(val[1]):
+			if astype == 'pol':
+				self.value = val
+				self.degrees = degrees_by_default if deg is None else deg
+			elif astype in ['cyl', 'sph']:
+				self.value = (val[0], val[1], 0.0)
+				self.degrees = degrees_by_default if deg is None else deg
+			elif astype == 'xyz':
+				self.value = (val[0], val[1], 0.0)
+			elif astype == 'xy' or astype is None:
+				self.value = val
+			else:
+				raise ValueError("Invalid vector type")
+			self.vtype = 'xy' if astype is None else astype
+		elif len(val) == 3 and isrealnum(val[0]) and isrealnum(val[1]):
+			if isrealnum(val[2]):
+				if astype in ['cyl', 'sph']:
+					self.value = val
+					self.degrees = degrees_by_default if deg is None else deg
+				elif astype == 'xyz' or astype is None:
+					self.value = val
+				else:
+					raise ValueError("Invalid vector type")
+				self.vtype = 'xyz' if astype is None else astype
+			elif val[2] in ['deg', 'rad']:
+				if astype in ['cyl', 'sph']:
+					self.value = (val[0], val[1], 0.0)
+				elif astype == 'pol' or astype is None:
+					self.value = (val[0], val[1])
+				else:
+					raise ValueError("Invalid vector type")
+				self.degrees = (val[2] == 'deg')
+				if deg is not None and self.degrees != deg:
+					sys.stderr.write("Warning (Vector): deg keyword is ignored\n")
+				self.vtype = 'pol' if astype is None else astype
+			else:
+				raise ValueError("Invalid vector input")
+		elif len(val) == 4 and isrealnum(val[0]) and isrealnum(val[1]) and val[2] in ['deg', 'rad'] and isrealnum(val[3]):
+			if astype == 'cyl' or astype is None:
+				self.value = val
+			else:
+				raise ValueError("Invalid vector type")
+			self.degrees = (val[2] == 'deg')
+			self.vtype = 'cyl'
+		elif len(val) == 5 and isrealnum(val[0]) and isrealnum(val[1]) and val[2] in ['deg', 'rad'] and isrealnum(val[3]) and val[4] in ['deg', 'rad']:
+			if val[2] != val[4]:
+				raise ValueError("Invalid vector input: deg and rad cannot be mixed")
+			if astype == 'sph' or astype is None:
+				self.value = val
+			else:
+				raise ValueError("Invalid vector type")
+			self.degrees = (val[2] == 'deg')
+			if deg is not None and self.degrees != deg:
+				sys.stderr.write("Warning (Vector): deg keyword is ignored\n")
+			self.vtype = 'cyl'
+		else:
+			raise ValueError("Invalid vector input. Valid formats: (x), (x,y), (x,y,z),(r,phi,'deg'), (r,phi,'deg',z), (r,theta,'deg',phi,'deg'), where 'deg' may be replaced by 'rad'.")
+		self.aunit = None if self.degrees is None else pi / 180. if self.degrees else 1.0  # angle unit
+
+	# component functions
+	def len(self, square = False):
+		"""Length (magnitude) of the vector.
+
+		Argument:
+		square    True or False. If True, return the squared value.
+		"""
+		if self.vtype in ['x', 'y', 'z', 'pol', 'cyl', 'sph']:
+			return self.value[0]**2 if square else abs(self.value[0])
+		elif self.vtype == 'xy':
+			r2 = self.value[0]**2 + self.value[1]**2
+			return r2 if square else np.sqrt(r2)
+		elif self.vtype == 'xyz':
+			r2 = self.value[0]**2 + self.value[1]**2 + self.value[2]**2
+			return r2 if square else np.sqrt(r2)
+		else:
+			raise TypeError
+
+	def __abs__(self):
+		return self.len()
+
+	def x(self):
+		"""Get the x component"""
+		if self.vtype in ['y', 'z']:
+			return 0.0
+		elif self.vtype in ['x', 'xy', 'xyz']:
+			return self.value[0]
+		elif self.vtype in ['pol', 'cyl']:
+			return self.value[0] * cos(self.aunit * self.value[1])  # r cos(phi)
+		elif self.vtype == 'sph':
+			return self.value[0] * sin(self.aunit * self.value[1]) * cos(self.aunit * self.value[2])  # r sin(theta) cos(phi)
+		else:
+			raise TypeError
+
+	def y(self):
+		"""Get the y component"""
+		if self.vtype in ['x', 'z']:
+			return 0.0
+		elif self.vtype == 'y':
+			return self.value[0]
+		elif self.vtype in ['xy', 'xyz']:
+			return self.value[1]
+		elif self.vtype in ['pol', 'cyl']:
+			return self.value[0] * sin(self.aunit * self.value[1])  # r sin(phi)
+		elif self.vtype == 'sph':
+			return self.value[0] * sin(self.aunit * self.value[1]) * sin(self.aunit * self.value[2])  # r sin(theta) sin(phi)
+		else:
+			raise TypeError
+
+	def z(self):
+		"""Get the z component"""
+		if self.vtype in ['x', 'y', 'xy', 'pol']:
+			return 0.0
+		elif self.vtype == 'z':
+			return self.value[0]
+		elif self.vtype in ['xyz', 'cyl']:
+			return self.value[2]
+		elif self.vtype == 'sph':
+			return self.value[0] * cos(self.aunit * self.value[1])  # r cos(theta)
+		else:
+			raise TypeError
+
+	def xy(self):
+		"""Get the x and y component (as tuple)"""
+		if self.vtype == 'z':
+			return (0.0, 0.0)
+		elif self.vtype == 'x':
+			return (self.value[0], 0.0)
+		elif self.vtype == 'y':
+			return (0.0, self.value[0])
+		elif self.vtype == 'xy':
+			return self.value
+		elif self.vtype == 'xyz':
+			return (self.value[0], self.value[1])
+		elif self.vtype in ['pol', 'cyl']:
+			return (self.value[0] * cos(self.aunit * self.value[1]), self.value[0] * sin(self.aunit * self.value[1]))  # r cos(phi), r sin(phi)
+		elif self.vtype == 'sph':
+			return (self.value[0] * sin(self.aunit * self.value[1]) * cos(self.aunit * self.value[2]), self.value[0] * sin(self.aunit * self.value[1]) * sin(self.aunit * self.value[2]))  # r sin(theta) cos(phi), r sin(theta) sin(phi)
+		else:
+			raise TypeError
+
+	def xyz(self):
+		"""Get the x, y, and z component (as tuple)"""
+		if self.vtype == 'x':
+			return (self.value[0], 0.0, 0.0)
+		elif self.vtype == 'y':
+			return (0.0, self.value[0], 0.0)
+		elif self.vtype == 'z':
+			return (0.0, 0.0, self.value[0])
+		elif self.vtype == 'xy':
+			return (self.value[0], self.value[1], 0.0)
+		elif self.vtype == 'xyz':
+			return self.value
+		elif self.vtype == 'pol':
+			return (self.value[0] * cos(self.aunit * self.value[1]), self.value[0] * sin(self.aunit * self.value[1]), 0.0)  # r cos(phi), r sin(phi), 0
+		elif self.vtype == 'cyl':
+			return (self.value[0] * cos(self.aunit * self.value[1]), self.value[0] * sin(self.aunit * self.value[1]), self.value[2])  # r cos(phi), r sin(phi), z
+		elif self.vtype == 'sph':
+			return (self.value[0] * sin(self.aunit * self.value[1]) * cos(self.aunit * self.value[2]), self.value[0] * sin(self.aunit * self.value[1]) * sin(self.aunit * self.value[2]), self.value[0] * cos(self.aunit * self.value[1]))  # r sin(theta) cos(phi), r sin(theta) sin(phi), r cos(theta)
+		else:
+			raise TypeError
+
+	def pm(self):
+		"""Get x + i y and x - i y (as tuple)"""
+		x, y = self.xy()
+		return x + 1.j * y, x - 1.j * y
+
+	def pmz(self):
+		"""Get x + i y, x - i y, and z (as tuple)"""
+		x, y, z = self.xyz()
+		return x + 1.j * y, x - 1.j * y, z
+
+	def polar(self, deg = True, fold = True):
+		"""Get polar coordinates r and phi (as tuple)
+
+		Arguments:
+		deg    True or False. Whether the return value of phi should be in
+		       degrees (True) or radians (False).
+		fold   True or False. Whether to use folding. See polar_fold().
+
+		Returns:
+		r, phi   Floats. Polar coordinates
+		"""
+		if self.vtype == 'z':
+			return (0.0, 0.0)
+		if self.vtype in ['x', 'y', 'xy', 'xyz']:
+			x, y = self.xy()
+			r, phi = to_polar(x, y, deg)
+		elif self.vtype in ['pol', 'cyl']:
+			r, phi = self.value[0], self.value[1]
+		elif self.vtype == 'sph':
+			r, phi = self.value[0] * sin(self.value[1] * self.aunit), self.value[2]  # r_xy, phi = r sin(theta), phi
+
+		if self.vtype in ['pol', 'cyl', 'sph']:
+			if deg and not self.degrees:
+				phi *= 180. / pi
+			elif not deg and self.degrees:
+				phi *= pi / 180.
+
+		return polar_fold(r, phi, deg, fold)
+
+	def cylindrical(self, deg = True, fold = True):
+		"""Get cylindrical coordinates r, phi and z (as tuple)
+
+		Arguments:
+		deg    True or False. Whether the return value of phi should be in
+		       degrees (True) or radians (False).
+		fold   True or False. Whether to use folding. See polar_fold().
+
+		Returns:
+		r, phi, z   Floats. Cylindrical coordinates
+		"""
+		if self.vtype in ['x', 'y', 'z', 'xy', 'xyz']:
+			x, y, z = self.xyz()
+			r, phi = to_polar(x, y, deg)
+		elif self.vtype == 'pol':
+			r, phi, z = self.value[0], self.value[1], 0.0
+		elif self.vtype == 'cyl':
+			r, phi, z = self.value
+		elif self.vtype == 'sph':
+			r, phi, z = self.value[0] * sin(self.value[1] * self.aunit), self.value[2], self.value[0] * cos(self.value[1] * self.aunit)  # r_xy, phi, z = r sin(theta), phi, r cos(theta)
+
+		if self.vtype in ['pol', 'cyl', 'sph']:
+			if deg and not self.degrees:
+				phi *= 180. / pi
+			elif not deg and self.degrees:
+				phi *= pi / 180.
+		r, phi = polar_fold(r, phi, deg, fold)
+		return r, phi, z
+
+	def spherical(self, deg = True, fold = True):
+		"""Get spherical coordinates r, theta and phi (as tuple)
+
+		Arguments:
+		deg    True or False. Whether the return value of phi should be in
+		       degrees (True) or radians (False).
+		fold   True or False. Whether to use folding. See spherical_fold().
+
+		Returns:
+		r, theta, phi    Floats. Spherical coordinates.
+		"""
+		if self.vtype in ['x', 'y', 'z', 'xy', 'xyz']:
+			x, y, z = self.xyz()
+			r, theta, phi = to_spherical(x, y, z, deg)
+		elif self.vtype == 'pol':
+			r, phi = self.value
+			theta = 90. if deg else pi / 2.
+			if deg and not self.degrees:  # we only need to rescale phi, not theta
+				phi *= 180. / pi
+			elif not deg and self.degrees:
+				phi *= pi / 180.
+		elif self.vtype == 'cyl':
+			rxy, phi, z = self.value
+			r = sqrt(rxy**2 + z**2)
+			if rxy == 0 and z >= 0:
+				theta = 0.0
+			elif rxy == 0.0 and z < 0:
+				theta = 180. if deg else pi
+			elif z == 0:
+				theta = 90. if deg else pi / 2.
+			else:
+				theta = acos(z / r) * 180. / pi if deg else acos(z / r)
+			if deg and not self.degrees:  # we only need to rescale phi, not theta
+				phi *= 180. / pi
+			elif not deg and self.degrees:
+				phi *= pi / 180.
+		elif self.vtype == 'sph':
+			r, theta, phi = self.value
+			if deg and not self.degrees:  # we rescale phi and theta
+				phi *= 180. / pi
+				theta *= 180. / pi
+			elif not deg and self.degrees:
+				phi *= pi / 180.
+				theta *= pi / 180.
+
+		return spherical_fold(r, theta, phi, deg, fold)
+
+	def component(self, comp, prefix = ''):
+		"""Get component value.
+
+		Argument:
+		comp    String. Which component to return.
+		prefix  String that matches the first part of the input comp, for
+		        example comp = 'kphi', prefix = 'k' is a valid input.
+
+		Returns:
+		A float. The value of the component.
+		"""
+		if comp is None or comp in [prefix, prefix + 'r']:
+			if self.vtype in ['pol', 'cyl', 'sph']:
+				return self.value[0]
+			else:
+				return self.len()
+		elif comp == prefix + 'x':
+			return self.x()
+		elif comp == prefix + 'y':
+			return self.y()
+		elif comp == prefix + 'z':
+			return self.z()
+		elif comp == prefix + 'phi':
+			if self.vtype == 'sph':
+				phi = self.value[2]
+			elif self.vtype == 'pol':
+				phi = self.value[1]
+			else:
+				_, phi = self.polar(deg = self.degrees, fold = None)
+			return phi
+		elif comp == prefix + 'theta':
+			_, theta, _ = self.spherical(deg = self.degrees, fold = None)
+			return theta
+		else:
+			raise ValueError("Invalid vector component")
+
+	def components(self, prefix = ''):
+		"""Get natural components depending on vector type.
+
+		Argument:
+		prefix   String that is prepended to the return value.
+
+		Returns:
+		List of strings.
+		"""
+		if self.vtype in ['x', 'y', 'z']:
+			return [prefix + self.vtype]
+		elif self.vtype == 'xy':
+			return [prefix + 'x', prefix + 'y']
+		elif self.vtype == 'xyz':
+			return [prefix + 'x', prefix + 'y', prefix + 'z']
+		elif self.vtype == 'pol':
+			return ['r' if prefix == '' else prefix, prefix + 'phi']
+		elif self.vtype == 'cyl':
+			return ['r' if prefix == '' else prefix, prefix + 'phi', prefix + 'z']
+		elif self.vtype == 'sph':
+			return ['r' if prefix == '' else prefix, prefix + 'theta', prefix + 'phi']
+		else:
+			raise TypeError
+
+	def to_dict(self, prefix = '', all_components = False):
+		"""Return a dict with components and values
+
+		Argument:
+		prefix           String that is prepended to the return value.
+		all_components   True or False. If True, give all components x, y, z,
+		                 phi, and theta, as well as len and abs. If False, give
+		                 the appropriate components for the vtype only.
+
+		Returns:
+		vdict   A dict instance, with vector components as keys.
+		"""
+		vdict = {}
+		if all_components:
+			for co in ['x', 'y', 'z', 'phi', 'theta']:
+				vdict[prefix + co] = self.component(co)
+			vdict[prefix + "len"] = self.len()
+			vdict[prefix + "abs"] = self.__abs__()  # in fact, identical result to len
+		else:
+			for co, val in zip(self.components(prefix = prefix), self.value):
+				vdict[co] = val
+		return vdict
+
+	def get_pname_pval(self, prefix = ''):
+		"""Return variable name and value for plot parameter text
+		Either a single component like 'kx = 0.1' or a tuple for multiple
+		components like '(kx, ky) = (0.1, 0.2)'.
+		"""
+		comp = self.components(prefix = prefix)
+		if len(self.value) == 1:
+			return comp[0], self.value[0]
+		else:
+			return tuple(comp), tuple(self.value)
+
+	def set_component(self, comp, val = None, prefix = '', inplace = True):
+		"""Set specific labelled component(s).
+
+		Arguments:
+		comp, val  Component and value. Can be one of the following
+		           combinations. If None, None, do nothing. If comp is a dict
+		           and val is None, set values according to the dict. (This must
+		           be of the form {component: value}, where component is a
+		           string, like 'x' and value a number. If comp is a string and
+		           val a number, set that component to that value. If comp is a
+		           list/tuple of strings and val is a list/tuple of number, set
+		           the components to the respective values.
+		prefix     Prefix for vector components, e.g., 'k'.
+		inplace    True or False. If True, return the present Vector instance.
+		           If False, return a new instance.
+
+		Returns:
+		The present or a new Vector instance.
+		"""
+		if comp is None and val is None:
+			return self
+		elif isinstance(comp, dict) and val is None:
+			comp_dict = comp
+		elif isinstance(comp, str) and isrealnum(val):
+			comp_dict = {comp: val}
+		elif isinstance(comp, (list, tuple)) and isinstance(val, (list, tuple)) and len(comp) == len(val):
+			comp_dict = {c: v for c, v in zip(comp, val)}
+		else:
+			raise TypeError("Illegal combination of arguments comp and val.")
+
+		value = [v for v in self.value]
+		# For debugging:
+		# print ("Comp", comp_dict)
+		for c in comp_dict:
+			if c not in self.components():
+				raise ValueError("Invalid vector component '%s' for vector type '%s'" % (c, self.vtype))
+			if c in ['x', 'r', '']:
+				value[0] = comp_dict[c]
+			elif c == 'y' or c == 'theta':
+				value[1] = comp_dict[c]
+			elif c == 'z':
+				value[2] = comp_dict[c]
+			elif c == 'phi' and self.vtype in ['pol', 'cyl']:
+				value[1] = comp_dict[c]
+			elif c == 'phi' and self.vtype == 'sph':
+				value[2] = comp_dict[c]
+			else:
+				raise ValueError
+		if inplace:
+			self.value = value
+			return self
+		else:
+			return Vector(value, astype = self.vtype, deg = self.degrees)
+
+	def astype(self, astype, inplace = False, deg = None, fold = True, force = False):
+		"""Convert Vector to the given vector type.
+
+		Arguments:
+		astype   String. Target vector type.
+		inplace  True or False. If True, return the present Vector instance. If
+		         False, return a new instance.
+		deg      True, False, or None. Whether the values of the angles in the
+		         target vector should be in degrees (True) or radians (False).
+		         If None, use the default.
+		fold     True or False. Whether to use folding for angular vector types.
+		force    True or False. If True, generate a new vector even if the
+		         target vector type is the same as that of the present instance.
+		         For angular types, this may involve folding or unfolding. If
+		         False, return the same vector if the vector types are the same.
+
+		Returns:
+		The present or a new Vector instance.
+		"""
+		if astype == self.vtype and not force:
+			newvalue = self.value
+		elif astype == 'x':
+			newvalue = self.x()
+		elif astype == 'y':
+			newvalue = self.y()
+		elif astype == 'z':
+			newvalue = self.z()
+		elif astype == 'xy':
+			newvalue = self.xy()
+		elif astype == 'xyz':
+			newvalue = self.xyz()
+		elif astype == 'pol':
+			newvalue = self.polar(deg = deg, fold = fold)
+		elif astype == 'cyl':
+			newvalue = self.cylindrical(deg = deg, fold = fold)
+		elif astype == 'sph':
+			newvalue = self.spherical(deg = deg, fold = fold)
+		else:
+			raise TypeError("Invalid vector type")
+		if inplace:
+			self.value = newvalue
+			self.vtype = astype
+			if self.vtype in ['pol', 'cyl', 'sph']:
+				self.degrees = degrees_by_default if deg is None else deg
+			else:
+				self.degrees = None
+			self.aunit = None if self.degrees is None else pi / 180. if self.degrees else 1.0  # angle unit
+			return self
+		else:
+			return Vector(newvalue, astype = astype, deg = deg)
+
+	def reflect(self, axis = None, inplace = False, deg = None, fold = True):
+		"""Reflect Vector to the given vector type.
+
+		Arguments:
+		axis     String or None. The axis/axes along which to reflect; one of
+		         '', 'x', 'y', 'z', 'xy', 'xz', 'yz', 'xyz'. The empty string is
+		         equivalent to the identity transformation. None is equivalent
+		         to 'xyz', which is an overall sign flip.
+		inplace  True or False. If True, return the present Vector instance. If
+		         False, return a new instance.
+		deg      True, False, or None. Whether the values of the angles in the
+		         target vector should be in degrees (True) or radians (False).
+		         If None, use the default.
+		fold     True or False. Whether to use folding for angular vector types.
+
+		Returns:
+		The present or a new Vector instance.
+		"""
+		if deg is None:
+			deg = self.degrees
+		# Default axis (None) is equivalent to 'xyz'
+		if axis is None:
+			axis = 'xyz'
+		elif axis in ['xz', 'yz']:
+			return self.reflect('z', inplace = inplace, deg = deg, fold = fold).reflect(axis[0], inplace = inplace, deg = deg, fold = fold)  # composition xz or yz
+		elif axis not in ['', 'x', 'y', 'z', 'xy', 'xyz']:
+			raise ValueError("Invalid axis")
+		if axis == '':  # do nothing
+			newvalue = self.value
+		elif self.vtype == 'x':
+			newvalue = (-self.value[0]) if 'x' in axis else (self.value[0],)
+		elif self.vtype == 'y':
+			newvalue = (-self.value[0]) if 'y' in axis else (self.value[0],)
+		elif self.vtype == 'z':
+			newvalue = (-self.value[0]) if 'z' in axis else (self.value[0],)
+		elif self.vtype == 'xy':
+			x, y = self.xy()
+			x1 = -x if 'x' in axis else x
+			y1 = -y if 'y' in axis else y
+			newvalue = (x1, y1)
+		elif self.vtype == 'xyz':
+			x, y, z = self.xyz()
+			x1 = -x if 'x' in axis else x
+			y1 = -y if 'y' in axis else y
+			z1 = -z if 'z' in axis else z
+			newvalue = (x1, y1, z1)
+		elif self.vtype == 'pol':
+			r, phi = self.polar(deg = deg, fold = fold)
+			if axis == 'xy' or axis == 'xyz':
+				newvalue = polar_fold(-r, phi, deg = deg, fold = fold)
+			elif axis == 'x':
+				phi0 = 180. if deg else np.pi
+				newvalue = polar_fold(r, phi0 - phi, deg = deg, fold = fold)
+			elif axis == 'y':
+				newvalue = polar_fold(r, -phi, deg = deg, fold = fold)
+			elif axis == 'z':
+				newvalue = (r, phi)
+		elif self.vtype == 'cyl':
+			r, phi, z = self.cylindrical(deg = deg, fold = fold)
+			if axis == 'xy' or axis == 'xyz':
+				r, phi = polar_fold(-r, phi, deg = deg, fold = fold)
+			elif axis == 'x':
+				phi0 = 180. if deg else np.pi
+				r, phi = polar_fold(r, phi0 - phi, deg = deg, fold = fold)
+			elif axis == 'y':
+				r, phi = polar_fold(r, -phi, deg = deg, fold = fold)
+			if 'z' in axis:
+				z = -z
+			newvalue = (r, phi, z)
+		elif self.vtype == 'sph':
+			r, theta, phi = self.spherical(deg = deg, fold = fold)
+			if axis == 'xyz':
+				r, theta, phi = spherical_fold(-r, theta, phi, deg = deg, fold = fold)
+			elif axis == 'xy':  # composition of xyz and z; other representations possible
+				theta0 = 180. if deg else np.pi
+				r, theta, phi = spherical_fold(-r, theta0 - theta, phi, deg = deg, fold = fold)
+			elif axis == 'x':
+				phi0 = 180. if deg else np.pi
+				r, theta, phi = spherical_fold(r, theta, phi0 - phi, deg = deg, fold = fold)
+			elif axis == 'y':
+				r, theta, phi = spherical_fold(r, theta, -phi, deg = deg, fold = fold)
+			elif axis == 'z':
+				theta0 = 180. if deg else np.pi
+				r, theta, phi = spherical_fold(r, theta0 - theta, phi, deg = deg, fold = fold)
+			newvalue = (r, theta, phi)
+		else:
+			raise TypeError("Invalid vector type")
+		if inplace:
+			self.value = newvalue
+			if self.vtype in ['pol', 'cyl', 'sph']:
+				self.degrees = degrees_by_default if deg is None else deg
+			else:
+				self.degrees = None
+			self.aunit = None if self.degrees is None else pi / 180. if self.degrees else 1.0  # angle unit
+			return self
+		else:
+			return Vector(newvalue, astype = self.vtype, deg = deg)
+
+	def __neg__(self):
+		"""Unary minus.
+		The same as self.reflect('xyz').
+		"""
+		return self.reflect()
+
+	def diff(self, other, square = False):
+		"""Distance between two vectors |v1 - v2|.
+
+		Arguments:
+		other    Vector instance or zero (0 or 0.0). The second vector. Zero
+		         means the zero vector.
+		square   True or False. If True, return |v1 - v2|^2 instead.
+
+		Returns:
+		A float.
+		"""
+		x1, y1, z1 = self.xyz()
+		if isinstance(other, Vector):
+			x2, y2, z2 = other.xyz()
+		elif other == 0.0:
+			x2, y2, z2 = 0.0, 0.0, 0.0
+		else:
+			raise TypeError("Comparison must be with another Vector object or 0.")
+		sqdiff = (x1 - x2)**2 + (y1 - y2)**2 + (z1 - z2)**2
+		return sqdiff if square else np.sqrt(sqdiff)
+
+	def __sub__(self, other):
+		"""Alias for vector difference, |v1 - v2|"""
+		return self.diff(other)
+
+	# equality, inequality, identity
+	def equal(self, other, acc = 1e-9):
+		"""Test vector equality v1 == v2.
+		Equality means that the two instances refer to the same point in (1-,
+		2-, or 3-dimensional) space. The representations (vector types and
+		values) need not be identical.
+
+		Arguments:
+		other    Vector instance or zero (0 or 0.0). The second vector. Zero
+		         means the zero vector.
+		acc      Float. The maximum Euclidean difference for the vectors to be
+		         considered equal. Default value is 1e-9.
+
+		Returns:
+		True or False.
+		"""
+		x1, y1, z1 = self.xyz()
+		if isinstance(other, Vector):
+			x2, y2, z2 = other.xyz()
+		elif other == 0.0:
+			x2, y2, z2 = 0.0, 0.0, 0.0
+		else:
+			raise TypeError("Comparison must be with another Vector object or 0.")
+		return abs(x1 - x2) < acc and abs(y1 - y2) < acc and abs(z1 - z2) < acc
+
+	def zero(self, acc = 1e-9):
+		"""Test whether vector equals zero vector.
+
+		Arguments:
+		acc      Float. The maximum length for the vector to be considered zero.
+		         Default value is 1e-9.
+
+		Returns:
+		True or False.
+		"""
+		return self.len(square = True) < acc**2
+
+	def __eq__(self, other):
+		"""Test equality with other Vector instance or zero."""
+		return self.zero() if other == 0.0 else self.equal(other)
+
+	def __ne__(self, other):
+		"""Test inequality with other Vector instance or zero."""
+		return (not self.zero()) if other == 0.0 else (not self.equal(other))
+
+	def identical(self, other, acc = 1e-9):
+		"""Test vector identity v1 === v2.
+		Identity means that the two instances have the same vector type and have
+		the same values.
+
+		Arguments:
+		other    Vector instance. The second vector.
+		acc      Float. The maximum absolute for the values to be considered
+		         equal. Default value is 1e-9.
+
+		Returns:
+		True or False.
+		"""
+		if isinstance(other, Vector):
+			if self.vtype != other.vtype:
+				return False
+			return all([abs(vi - wi) < acc for vi, wi in zip(self.value, other.value)])
+		else:
+			raise TypeError("Comparison must be with another Vector object.")
+
+	def parallel(self, other, acc = 1e-9):
+		"""Test whether two vectors are parallel.
+		Do so by calculation the cross product. This is equal to zero if and
+		only if the vectors are parallel.
+
+		Arguments:
+		other    Vector instance or zero. The second vector. If zero, interpret
+		         as the zero vector. Then the result is always True.
+		acc      Float. The maximum length difference of the cross product for
+		         it to be considered zero. Default value is 1e-9.
+
+		Returns:
+		True or False.
+		"""
+		if isinstance(other, Vector):
+			if self.zero() or other.zero():
+				return True
+			else:
+				x1, y1, z1 = self.xyz()
+				x2, y2, z2 = other.xyz()
+				xo, yo, zo = y1 * z2 - z1 * y2, z1 * x2 - x1 * z2, x1 * y2 - y1 * x2  # outer product
+				return abs(xo) < acc and abs(yo) < acc and abs(zo) < acc
+		else:
+			raise TypeError("Comparison must be with another Vector object.")
+
+	def perpendicular(self, other, acc = 1e-9):
+		"""Test whether two vectors are perpendicular.
+		Do so by calculation the inner product. This is equal to zero if and
+		only if the vectors are perpendicular.
+
+		Arguments:
+		other    Vector instance or zero. The second vector. If zero, interpret
+		         as the zero vector. Then the result is always True.
+		acc      Float. The maximum length difference of the cross product for
+		         it to be considered zero. Default value is 1e-9.
+
+		Returns:
+		True or False.
+		"""
+		if isinstance(other, Vector):
+			if self.zero() or other.zero():
+				return True
+			else:
+				x1, y1, z1 = self.xyz()
+				x2, y2, z2 = other.xyz()
+				ip = x1 * x2 + y1 * y2 + z1 * z2  # inner product
+				return abs(ip) < acc
+		else:
+			raise TypeError("Comparison must be with another Vector object.")
+
+	def __str__(self, formatstr='%6.3f'):
+		"""String representation"""
+		try:
+			if self.vtype in ['x', 'y', 'z']:
+				return formatstr % self.value
+			elif self.vtype == 'xy':
+				return ("(" + formatstr + ", " + formatstr + ")") % self.value
+			elif self.vtype == 'xyz':
+				return ("(" + formatstr + ", " + formatstr + ", " + formatstr + ")") % self.value
+			elif self.vtype == 'pol':
+				return (("(" + formatstr + ", %s)") % (self.value[0], degstr(self.value[1]))) if self.degrees else (("(" + formatstr + ", " + formatstr + " rad)") % self.value)
+			elif self.vtype == 'cyl':
+				return (("(" + formatstr + ", %s, " + formatstr + ")") % (self.value[0], degstr(self.value[1]), self.value[2])) if self.degrees else (("(" + formatstr + ", " + formatstr + " rad, " + formatstr + ")") % self.value)
+			elif self.vtype == 'sph':
+				return (("(" + formatstr + ", %s, %s)") % (self.value[0], degstr(self.value[1]), degstr(self.value[2]))) if self.degrees else (("(" + formatstr + ", " + formatstr + " rad, " + formatstr + " rad)") % self.value)
+			else:
+				raise TypeError("Invalid Vector type")
+		except:
+			raise ValueError("Error printing Vector")
+
+	def __repr__(self):
+		return str(self)
+
+	def xmlattr(self, prefix = ''):
+		"""XML output (attributes and values)
+
+		Attributes:
+		prefix   String that is prepended to the vector components to form the
+		         attributes.
+
+		Returns:
+		A dict of the form {attribute: value, ...}, where attribute is the
+		XML attribute for an XML <vector> tag or similar.
+		"""
+		attr = {}
+		if self.vtype in ['x', 'y', 'z']:
+			attr[prefix + self.vtype] = self.value[0]
+		elif self.vtype == 'xy':
+			attr[prefix + 'x'] = self.value[0]
+			attr[prefix + 'y'] = self.value[1]
+		elif self.vtype == 'xyz':
+			attr[prefix + 'x'] = self.value[0]
+			attr[prefix + 'y'] = self.value[1]
+			attr[prefix + 'z'] = self.value[2]
+		elif self.vtype == 'pol':
+			if len(prefix) == 0:
+				attr['r'] = self.value[0]
+			else:
+				attr[prefix + ''] = self.value[0]
+			attr[prefix + 'phi'] = self.value[1]
+			x, y = self.xy()
+			attr[prefix + 'x'] = x
+			attr[prefix + 'y'] = y
+		elif self.vtype == 'cyl':
+			if len(prefix) == 0:
+				attr['r'] = self.value[0]
+			else:
+				attr[prefix + ''] = self.value[0]
+			attr[prefix + 'phi'] = self.value[1]
+			x, y, z = self.xyz()
+			attr[prefix + 'x'] = x
+			attr[prefix + 'y'] = y
+			attr[prefix + 'z'] = z
+		elif self.vtype == 'sph':
+			if len(prefix) == 0:
+				attr['r'] = self.value[0]
+			else:
+				attr[prefix + ''] = self.value[0]
+			attr[prefix + 'theta'] = self.value[1]
+			attr[prefix + 'phi'] = self.value[2]
+			x, y, z = self.xyz()
+			attr[prefix + 'x'] = x
+			attr[prefix + 'y'] = y
+			attr[prefix + 'z'] = z
+		else:
+			raise TypeError
+		if self.vtype in ['pol', 'cyl', 'sph']:
+			attr['angleunit'] = 'deg' if self.degrees else 'rad'
+		return attr
+
+	# legacy function
+	def to_tuple(self):
+		if self.vtype in ['x', 'z']:
+			return self.value[0]
+		elif self.vtype in ['xy', 'xyz']:
+			return self.value
+		elif self.vtype == 'pol':
+			return (self.value[0], self.value[1], 'deg' if self.degrees else 'rad')
+		elif self.vtype in ['y', 'cyl', 'sph']:
+			sys.stderr.write("Warning (Vector.to_tuple): Backconversion not possible for type '%s'.\n" % self.vtype)
+			return None
+		else:
+			raise TypeError
+
+def is_diagonal(m, acc = 1e-9):
+	"""Test if a matrix/array is diagonal"""
+	return m.ndim == 2 and m.shape[0] == m.shape[1] and m.shape[0] > 0 and (np.amax(np.abs(m - np.diag(np.diagonal(m)))) < acc)
+
+class VectorTransformation(object):
+	"""Vector transformation object.
+	This defines a linear transformation on cartesian, cylindrical and sperical
+	coordinates. For cartesian coordinates, this is just a matrix multiplication
+	by a matrix M, i.e., v -> M v. For cylindrical and spherical coordinates,
+	the angles may need to be shifted, so that an affine transformation is
+	required, i.e., v -> M v + u, where M is a matrix and u is a vector.
+
+	A VectorTransformation instance is used to apply a transformation to either
+	a Vector or	a VectorGrid instance.
+
+	Attributes:
+	name       String. A label.
+	mat_cart   Numpy array of shape (3, 3). Transformation matrix M in cartesian
+	           coordinates (vector representation).
+	mat_cyl    Numpy array of shape (3, 3). Transformation matrix M in
+	           cylindrical coordinates.
+	mat_sph    Numpy array of shape (3, 3). Transformation matrix M in spherical
+	           coordinates.
+	delta_cyl  Numpy array of length 3. Vector shift u for cylindrical
+	           transformation.
+	delta_sph  Numpy array of length 3. Vector shift u for spherical
+	           transformation.
+	mat_e      Numpy array of shape (2, 2). Transformation matrix M in the E
+	           representation.
+	a2g        Float, either 1.0 or -1.0. Transformation in the A2g
+	           representation of Oh.
+	"""
+	def __init__(self, name, mat_cart, mat_cyl, mat_sph, delta_cyl = None, delta_sph = None, mat_e = None, a2g = None):
+		self.name = name
+		self.mat_cart = np.array(mat_cart)
+		self.mat_cart = np.diag(self.mat_cart) if self.mat_cart.ndim == 1 else self.mat_cart
+		if mat_cyl is None:
+			self.mat_cyl = None
+		else:
+			self.mat_cyl = np.array(mat_cyl)
+			self.mat_cyl = np.diag(self.mat_cyl) if self.mat_cyl.ndim == 1 else self.mat_cyl
+		if mat_sph is None:
+			self.mat_sph = None
+		else:
+			self.mat_sph = np.array(mat_sph)
+			self.mat_sph = np.diag(self.mat_sph) if self.mat_sph.ndim == 1 else self.mat_sph
+		for m in [self.mat_cart, self.mat_cyl, self.mat_sph]:
+			if isinstance(m, np.ndarray) and m.shape != (3, 3):
+				raise ValueError("Inputs must be 3x3 matrices or length-3 arrays.")
+		self.delta_cyl = np.array([0., 0., 0.]) if delta_cyl is None else np.array(delta_cyl)
+		self.delta_sph = np.array([0., 0., 0.]) if delta_sph is None else np.array(delta_sph)
+		if self.delta_cyl.shape != (3,) or self.delta_sph.shape != (3,):
+			raise ValueError("Input arguments 'delta_cyl' and 'delta_sph' must be length-3 arrays or None.")
+		if mat_e is None:
+			m = self.mat_cart
+			s3 = np.sqrt(3)
+			self.mat_e = np.array([
+				[0.5 * (m[0, 0]**2 - m[1, 0]**2) - 0.5 * (m[0, 1]**2 - m[1, 1]**2), 0.5 * s3 * (m[0, 2]**2 - m[1, 2]**2)],
+				[0.5 * s3 * (m[2, 0]**2 - m[2, 1]**2), 1.5 * m[2, 2]**2 - 0.5]
+			])  # TODO: Check this!
+		else:
+			self.mat_e = np.array(mat_e)
+			self.mat_e = np.diag(self.mat_e) if self.mat_e.ndim == 1 else self.mat_e
+			if self.mat_e.shape != (2, 2):
+				raise ValueError("Argument mat_e must be None or an array of shape (2,) or (2, 2).")
+		if a2g == -1.0 or a2g == 1.0:
+			self.a2g = float(a2g)
+		elif a2g is None:
+			self.a2g = 1.0
+		else:
+			raise TypeError("Argument a2g must have the value -1 or 1, or be a 1x1 array with one of these values.")
+
+	def grid_safe(self, vtype, var):
+		"""Test whether the transformation is 'grid safe' for a specific vector type.
+		Grid safe means that the result of the transformation can again be
+		written as a grid of the same type. For example, a rotation about a
+		generic angle (not a multiple of 90 degrees) is not 'grid safe' for a
+		cartesian grid.
+
+		Arguments:
+		vtype   String. Vector type.
+		var     String or list of strings. For cartesian grids, which are the
+		        variable (non-constant) components of the grid.
+		"""
+		if isinstance(var, str):
+			var = [var]
+		if vtype in ['x', 'y', 'z', 'xy', 'xyz']:
+			coord = np.array(['x' in var, 'y' in var, 'z' in var])
+			m = 1 * self.mat_cart[coord][:, coord]
+			m[np.abs(m) < 1e-9] = 0
+			for v in m:
+				if np.count_nonzero(v) != 1:
+					return False
+			mh_m = np.dot(np.transpose(np.conjugate(m)), m)
+			return is_diagonal(mh_m)
+		elif vtype == 'sph':
+			if self.mat_sph is None:
+				return False
+			coord = np.array(['r' in var, 'theta' in var, 'phi' in var])
+			return is_diagonal(self.mat_sph[coord][:, coord])
+		elif vtype in ['pol', 'cyl']:
+			if self.mat_cyl is None:
+				return False
+			coord = np.array(['r' in var, 'phi' in var, 'z' in var])
+			return is_diagonal(self.mat_cyl[coord][:, coord])
+		else:
+			return ValueError("Invalid vtype")
+
+	def __call__(self, v, fold = True):
+		"""Apply transformation to Vector or VectorGrid.
+
+		Arguments:
+		v     Vector or VectorGrid instance.
+		fold  True or False. Whether to use folding for angular vector types.
+
+		Returns:
+		A new Vector or VectorGrid instance.
+		"""
+		newvtype = v.vtype
+		if isinstance(v, Vector):
+			if v.vtype in ['x', 'y', 'z', 'xy', 'xyz']:
+				vec = v.xyz()
+				newvec = np.dot(self.mat_cart, vec)
+				if v.vtype != 'xyz':
+					newvec = [newvec[0]] if v.vtype == 'x' else [newvec[1]] if v.vtype == 'y' else [newvec[2]] if v.vtype == 'z' else newvec[0:2]
+			elif v.vtype == 'pol':
+				if self.mat_cyl is None:
+					newvec = np.dot(self.mat_cart, np.array(v.xyz()))
+					newvtype = 'xyz'
+				else:
+					vec = np.concatenate((v.value, [0]))
+					delta_mult = np.array([1., 1. if v.degrees else pi / 180, 1.])
+					newvec = (np.dot(self.mat_cyl, vec) + delta_mult * self.delta_cyl)[0:2]
+			elif v.vtype == 'cyl':
+				if self.mat_cyl is None:
+					newvec = np.dot(self.mat_cart, np.array(v.xyz()))
+					newvtype = 'xyz'
+				else:
+					vec = v.value
+					delta_mult = np.array([1., 1. if v.degrees else pi / 180, 1.])
+					newvec = np.dot(self.mat_cyl, vec) + delta_mult * self.delta_cyl
+			elif v.vtype == 'sph':
+				if self.mat_sph is None:
+					newvec = np.dot(self.mat_cart, np.array(v.xyz()))
+					newvtype = 'xyz'
+				else:
+					vec = v.value
+					delta_mult = np.array([1., 1. if v.degrees else pi / 180, 1. if v.degrees else pi / 180])
+					newvec = np.dot(self.mat_sph, vec) + delta_mult * self.delta_sph
+			else:
+				raise ValueError("Invalid vector type")
+			out_v = Vector(*newvec, astype = newvtype, deg = v.degrees)
+			if fold:
+				out_v.astype(v.vtype, inplace = True, deg = v.degrees, fold = True, force = True)
+			elif newvtype != v.vtype:
+				out_v.astype(v.vtype, inplace = True, deg = v.degrees, fold = False, force = False)
+			return out_v
+		elif isinstance(v, VectorGrid):
+			if not self.grid_safe(v.vtype, v.var):
+				sys.stderr.write("Warning (VectorTransformation): Transformation does not preserve grid.\n")
+				return None
+			if v.vtype == 'x':
+				new_val = [np.dot(self.mat_cart, (val, 0, 0)).x() for val in v]
+				return VectorGrid('x', new_val, astype = 'x', prefix = v.prefix)
+			elif v.vtype == 'y':
+				new_val = [np.dot(self.mat_cart, (0, val, 0)).y() for val in v]
+				return VectorGrid('y', new_val, astype = 'y', prefix = v.prefix)
+			elif v.vtype == 'z':
+				new_val = [np.dot(self.mat_cart, (0, 0, val)).z() for val in v]
+				return VectorGrid('z', new_val, astype = 'z', prefix = v.prefix)
+			elif v.vtype == 'xy':
+				new_val = np.array([np.dot(self.mat_cart, vec.xyz()) for vec in v])
+				new_val_u = [np.unique(x) for x in new_val.transpose()]
+				return VectorGrid('x', new_val_u[0], 'y', new_val_u[1], astype = 'xy', prefix = v.prefix)
+			elif v.vtype == 'xyz':
+				new_val = np.array([np.dot(self.mat_cart, vec.xyz()) for vec in v])
+				new_val_u = [np.unique(x) for x in new_val.transpose()]
+				return VectorGrid('x', new_val_u[0], 'y', new_val_u[1], 'z', new_val_u[2], astype = 'xyz', prefix = v.prefix)
+			elif v.vtype == 'pol':
+				delta_mult = np.array([1., 1. if v.degrees else pi / 180, 1.])
+				delta = delta_mult * self.delta_cyl
+				new_val = np.array([np.dot(self.mat_cyl, vec.polar(deg = v.degrees, fold = False) + (0,)) for vec in v]) + delta[np.newaxis, :]
+				new_val_u = [np.unique(x) for x in new_val.transpose()]
+				return VectorGrid('r', new_val_u[0], 'phi', new_val_u[1], astype = 'pol', deg = v.degrees, prefix = v.prefix)
+			elif v.vtype == 'cyl':
+				delta_mult = np.array([1., 1. if v.degrees else pi / 180, 1.])
+				delta = delta_mult * self.delta_cyl
+				new_val = np.array([np.dot(self.mat_cyl, vec.cylindrical(deg = v.degrees, fold = False)) for vec in v]) + delta[np.newaxis, :]
+				new_val_u = [np.unique(x) for x in new_val.transpose()]
+				return VectorGrid('r', new_val_u[0], 'phi', new_val_u[1], 'z', new_val_u[2], astype = 'cyl', deg = v.degrees, prefix = v.prefix)
+			elif v.vtype == 'sph':
+				delta_mult = np.array([1., 1. if v.degrees else pi / 180, 1. if v.degrees else pi / 180])
+				delta = delta_mult * self.delta_sph
+				new_val = np.array([np.dot(self.mat_sph, vec.spherical(deg = v.degrees, fold = False)) for vec in v]) + delta[np.newaxis, :]
+				new_val_u = [np.unique(x) for x in new_val.transpose()]
+				return VectorGrid('r', new_val_u[0], 'theta', new_val_u[1], 'phi', new_val_u[2], astype = 'sph', deg = v.degrees, prefix = v.prefix)
+			else:
+				raise ValueError("Invalid vector type")
+		else:
+			raise TypeError("Argument v must be a Vector or VectorGrid instance.")
+
+	def transform(self, rep, values):
+		"""Apply representation action.
+
+		Arguments:
+		rep      String. The representation label.
+		values   Float or numpy array. The value or vector that the
+		         representation acts on.
+
+		Returns:
+		Float or numpy array, like argument values.
+		"""
+		if rep.lower() in ['a1', 'a1g', 'triv']:
+			return values
+		elif rep.lower() in ['a2', 'a1u', 'parity']:
+			return self.det() * values
+		elif rep.lower() in ['a2g']:
+			return self.a2g * values
+		elif rep.lower() in ['a2u']:
+			return self.a2g * self.det() * values
+		elif rep.lower() in ['t1', 't1g', 'axial']:
+			return self.det() * np.dot(self.mat_cart, values)
+		elif rep.lower() in ['t2', 't1u', 'vector']:
+			return np.dot(self.mat_cart, values)
+		elif rep.lower() in ['t2g']:
+			return self.a2g * self.det() * np.dot(self.mat_cart, values)
+		elif rep.lower() in ['t2u']:
+			return self.a2g * np.dot(self.mat_cart, values)
+		elif rep.lower() in ['e', 'eg']:
+			return np.dot(self.mat_e, values)
+		elif rep.lower() in ['eu']:
+			return self.det() * np.dot(self.mat_e, values)
+		else:
+			raise ValueError("Invalid representation")
+
+	def __mul__(self, other):
+		"""Multiply two VectorTransformation instances"""
+		new_name = self.name + '*' + other.name
+		new_mat_cart = np.dot(self.mat_cart, other.mat_cart)
+		if self.mat_cyl is None or other.mat_cyl is None:
+			new_mat_cyl = None
+			new_delta_cyl = None
+		else:
+			new_mat_cyl = np.dot(self.mat_cyl, other.mat_cyl)
+			new_delta_cyl = np.dot(self.mat_cyl, other.delta_cyl) + self.delta_cyl
+		if self.mat_sph is None or other.mat_sph is None:
+			new_mat_sph = None
+			new_delta_sph = None
+		else:
+			new_mat_sph = np.dot(self.mat_sph, other.mat_sph)
+			new_delta_sph = np.dot(self.mat_sph, other.delta_sph) + self.delta_sph
+		new_mat_e = np.dot(self.mat_e, other.mat_e)
+		new_a2g = self.a2g * other.a2g
+		return VectorTransformation(new_name, new_mat_cart, new_mat_cyl, new_mat_sph, delta_cyl = new_delta_cyl, delta_sph = new_delta_sph, mat_e = new_mat_e, a2g = new_a2g)
+
+	def inv(self):
+		"""Get the inverse transformation"""
+		new_name = self.name[:-2] if self.name.endswith('\u207b\xb9') else self.name[:-1] + '\u207a' if self.name.endswith('\u207b') else self.name[:-1] + '\u207b' if self.name.endswith('\u207a') else self.name[:-1] + '+' if self.name.endswith('-') else self.name[:-1] + '-' if self.name.endswith('+') else self.name + '\u207b\xb9'
+		new_mat_cart = np.linalg.inv(self.mat_cart)
+		if self.mat_cyl is None:
+			new_mat_cyl = None
+			new_delta_cyl = None
+		else:
+			new_mat_cyl = np.linalg.inv(self.mat_cyl)
+			new_delta_cyl = -np.dot(new_mat_cyl, self.delta_cyl)
+		if self.mat_sph is None:
+			new_mat_sph = None
+			new_delta_sph = None
+		else:
+			new_mat_sph = np.linalg.inv(self.mat_sph)
+			new_delta_sph = -np.dot(new_mat_sph, self.delta_sph)
+		new_mat_e = np.linalg.inv(self.mat_e)
+		return VectorTransformation(new_name, new_mat_cart, new_mat_cyl, new_mat_sph, delta_cyl = new_delta_cyl, delta_sph = new_delta_sph, mat_e = new_mat_e, a2g = self.a2g)
+
+	def det(self):
+		"""Get the determinant"""
+		return np.linalg.det(self.mat_cart)
+
+	def __str__(self):
+		"""String representations"""
+		return ("<Vector transformation %s>" % self.name)
+
+
+### VECTOR TRANSFORMATION DEFINITIONS ###
+_c3 = np.cos(2 * pi / 3)
+_s3 = np.sin(2 * pi / 3)
+vt_1 = VectorTransformation('1', [1, 1, 1], [1, 1, 1], [1, 1, 1])
+vt_i = VectorTransformation('i', [-1, -1, -1], [1, 1, -1], [1, -1, 1], delta_cyl = [0, 180, 0], delta_sph = [0, 180, 180])
+vt_2z = VectorTransformation('2(z)', [-1, -1, 1], [1, 1, 1], [1, 1, 1], delta_cyl = [0, 180, 0], delta_sph = [0, 0, 180])
+vt_mz = VectorTransformation('m(z)', [1, 1, -1], [1, 1, -1], [1, -1, 1], delta_cyl = [0, 0, 0], delta_sph = [0, 180, 0])
+vt_3z = VectorTransformation('3(z)', [[_c3, -_s3, 0], [_s3, _c3, 0], [0, 0, 1]], [1, 1, 1], [1, 1, 1], delta_cyl = [0, 120, 0], delta_sph = [0, 0, 120], mat_e = [[_c3, -_s3], [_s3, _c3]])
+vt_3a = VectorTransformation('3(a)', [[0, 0, -1], [-1, 0, 0], [0,  1, 0]], None, None)
+vt_3b = VectorTransformation('3(b)', [[0, 0,  1], [-1, 0, 0], [0, -1, 0]], None, None)
+vt_3c = VectorTransformation('3(c)', [[0, 0, -1], [ 1, 0, 0], [0, -1, 0]], None, None)
+vt_3d = VectorTransformation('3(d)', [[0, 0,  1], [ 1, 0, 0], [0,  1, 0]], None, None)
+vt_m3z = VectorTransformation('-3(z)', [[_c3, -_s3, 0], [_s3, _c3, 0], [0, 0, -1]], [1, 1, -1], [1, -1, 1], delta_cyl = [0, 120, 0], delta_sph = [0, 180, 120], mat_e = [[_c3, -_s3], [_s3, _c3]])
+vt_4z = VectorTransformation('4(z)', [[0, 1, 0], [-1, 0, 0], [0, 0, 1]], [ 1, 1, 1], [ 1, 1, 1], delta_cyl = [0, 90, 0], delta_sph = [0, 0, 90], a2g = -1)
+vt_m4z = VectorTransformation('-4(z)', [[0, 1, 0], [-1, 0, 0], [0, 0, -1]], [ 1, 1, -1], [ 1, -1, 1], delta_cyl = [0, 90, 0], delta_sph = [0, 180, 90], a2g = -1)
+vt_mx = VectorTransformation('m(x)', [-1, 1, 1], [1, -1, 1], [1, 1, -1], delta_cyl = [0, 180, 0], delta_sph = [0, 0, 180])
+vt_my = VectorTransformation('m(y)', [1, -1, 1], [1, -1, 1], [1, 1, -1], delta_cyl = [0, 0, 0], delta_sph = [0, 0, 0])
+vt_mt = VectorTransformation('m(t)', [[-_c3,  _s3, 0], [ _s3,  _c3, 0], [0, 0, 1]], [1, -1, 1], [1, 1, -1], delta_cyl = [0, 60, 0], delta_sph = [0, 0, 60], mat_e = [[-_c3, _s3], [_s3, _c3]])
+vt_mu = VectorTransformation('m(u)', [[ _c3, -_s3, 0], [-_s3, -_c3, 0], [0, 0, 1]], [1, -1, 1], [1, 1, -1], delta_cyl = [0, -120, 0], delta_sph = [0, 0, -120], mat_e = [[_c3, -_s3], [-_s3, -_c3]])
+vt_mv = VectorTransformation('m(v)', [[-_c3, -_s3, 0], [-_s3,  _c3, 0], [0, 0, 1]], [1, -1, 1], [1, 1, -1], delta_cyl = [0, -60, 0], delta_sph = [0, 0, -60], mat_e = [[-_c3, -_s3], [-_s3, _c3]])
+vt_mw = VectorTransformation('m(w)', [[ _c3,  _s3, 0], [ _s3, -_c3, 0], [0, 0, 1]], [1, -1, 1], [1, 1, -1], delta_cyl = [0, 120, 0], delta_sph = [0, 0, 120], mat_e = [[_c3, _s3], [_s3, -_c3]])
+vt_mxpy = VectorTransformation('m(x+y)', [[0, 1, 0], [1, 0, 0], [0, 0, 1]], [1, -1, 1], [1, 1, -1], delta_cyl = [0, 90, 0], delta_sph = [0, 0, 90], a2g = -1)
+vt_mxmy = VectorTransformation('m(x-y)', [[0, -1, 0], [-1, 0, 0], [0, 0, 1]], [1, -1, 1], [1, 1, -1], delta_cyl = [0, -90, 0], delta_sph = [0, 0, -90], a2g = -1)
+vt_2x = VectorTransformation('2(x)', [1, -1, -1], [1, -1, -1], [1, -1, -1], delta_cyl = [0, 0, 0], delta_sph = [0, 180, 0])
+vt_2y = VectorTransformation('2(y)', [-1, 1, -1], [1, -1, -1], [1, -1, -1], delta_cyl = [0, 180, 0], delta_sph = [0, 180, 180])
+vt_2xpy = VectorTransformation('2(x+y)', [[0, 1, 0], [1, 0, 0], [0, 0, -1]], [1, -1, -1], [1, -1, -1], delta_cyl = [0, 90, 0], delta_sph = [0, 180, 90], a2g = -1)
+vt_2xmy = VectorTransformation('2(x-y)', [[0, -1, 0], [-1, 0, 0], [0, 0, -1]], [1, -1, -1], [1, -1, -1], delta_cyl = [0, -90, 0], delta_sph = [0, 180, -90], a2g = -1)
+all_vectrans = [vt_1, vt_i, vt_2z, vt_mz, vt_3z, vt_m3z, vt_3a, vt_3b, vt_3c, vt_3d, vt_4z, vt_m4z, vt_mx, vt_my, vt_mt, vt_mu, vt_mv, vt_mw, vt_mxpy, vt_mxmy, vt_2x, vt_2y, vt_2xpy, vt_2xmy]
+
+
+def get_vectortransformation(name):
+	"""Get vector transformation by name/label"""
+	if name == 'all':
+		return all_vectrans
+	for vt in all_vectrans:
+		if vt.name == name:
+			return vt
+	raise IndexError
+
+def vector_from_attr(attr, prefix = '', deg = True):
+	"""Get Vector instance from XML attributes
+
+	Arguments:
+	attr     A dict instance of the form {attribute: value, ...}.
+	prefix   String. Vector prefix common to all of its components.
+	deg      True or False. Whether the angular unit of the output vector should
+	         be degrees (True) or radians (False).
+
+	Returns:
+	A Vector instance.
+	"""
+	if prefix + '' in attr and prefix + 'phi' in attr and prefix + 'theta' in attr:
+		return Vector(float(attr[prefix + '']), float(attr[prefix + 'theta']), float(attr[prefix + 'phi']), astype = 'sph', deg = deg)
+	elif prefix + '' in attr and prefix + 'phi' in attr and prefix + 'z' in attr:
+		return Vector(float(attr[prefix + '']), float(attr[prefix + 'phi']), float(attr[prefix + 'z']), astype = 'cyl', deg = deg)
+	elif prefix + '' in attr and prefix + 'phi' in attr:
+		return Vector(float(attr[prefix + '']), float(attr[prefix + 'phi']), astype = 'pol', deg = deg)
+	elif prefix + '' in attr and prefix + 'theta' in attr:
+		return Vector(float(attr[prefix + '']), float(attr[prefix + 'theta']), 0.0, astype = 'sph', deg = deg)
+	elif prefix + 'x' in attr and prefix + 'y' in attr and prefix + 'z' in attr:
+		return Vector(float(attr[prefix + 'x']), float(attr[prefix + 'y']), float(attr[prefix + 'z']), astype = 'xyz')
+	elif prefix + 'x' in attr and prefix + 'y' in attr:
+		return Vector(float(attr[prefix + 'x']), float(attr[prefix + 'y']), astype = 'xy')
+	elif prefix + 'x' in attr and prefix + 'z' in attr:
+		return Vector(float(attr[prefix + 'x']), 0.0, float(attr[prefix + 'z']), astype = 'xyz')
+	elif prefix + 'y' in attr and prefix + 'z' in attr:
+		return Vector(0.0, float(attr[prefix + 'y']), float(attr[prefix + 'z']), astype = 'xyz')
+	elif prefix + 'x' in attr:
+		return Vector(float(attr[prefix + 'x']), astype = 'x')
+	elif prefix + 'y' in attr:
+		return Vector(float(attr[prefix + 'y']), astype = 'y')
+	elif prefix + 'z' in attr:
+		return Vector(float(attr[prefix + 'z']), astype = 'z')
+	elif prefix + '' in attr:
+		return Vector(float(attr[prefix + '']), 0.0, astype = 'pol', deg = deg)
+	else:
+		raise ValueError("Illegal combination of components")
+
+class VectorGrid:
+	"""Container class for vector grids.
+	Vector grids are defined in terms of their components, which may be variable
+	(multiple components) or constant.
+
+	Example:
+	  VectorGrid('x', [0, 1], 'y', 1, 'z', [2, 3, 4])
+	contains the vectors (in cartesian notation)
+	  (0, 1, 2), (0, 1, 3), (0, 1, 4), (1, 1, 2), (1, 1, 3),  (1, 1, 4).
+	Here, 'x' and 'z' are the variable components and 'y' is a constant
+	component.
+
+	Attributes:
+	var          List of strings. The variable components.
+	values       List of arrays. The values for the variable components.
+	const        List of strings. The constant components.
+	constvalues  List of floats. The values for the constant components.
+	vtype        String. The vector type, which defines the parametrization of
+	             the vector. Is one of: 'x', 'y', 'z', 'xy', 'xyz', 'pol',
+	             'cyl', 'sph'.
+	degrees      True, False or None. Whether angular units are degrees (True)
+	             or radians (False). None means unknown or undefined.
+	shape        Tuple or integers. Shape of the resulting grid.
+	ndim         Integer. Number of variable components.
+	prefix       String. Common prefix for vector components.
+	"""
+	def __init__(self, *args, astype = None, deg = None, prefix = None):
+		self.vtype = astype
+		if self.vtype in ['pol', 'cyl', 'sph']:
+			self.degrees = degrees_by_default if deg is None else deg
+		elif self.vtype in ['x', 'y', 'z', 'xy', 'xyz']:
+			self.degrees = None
+		else:
+			raise ValueError("Invalid vector type")
+
+		if prefix is None:
+			self.prefix = ''
+		elif isinstance(prefix, str):
+			self.prefix = prefix
+		else:
+			raise TypeError("Prefix must be a string")
+
+		if len(args) % 2 != 0:
+			raise ValueError("Invalid number of inputs")
+		self.var = []
+		self.values = []
+		self.const = []
+		self.constvalues = []
+		self.shape = []
+		self.ndim = 0
+		for j in range(0, len(args), 2):
+			var = args[j]
+			val = args[j+1]
+			if not isinstance(var, str):
+				raise TypeError("Invalid variable")
+			if prefix is not None and var.startswith(prefix):
+				var = "".join(var.split(prefix)[1:])
+			if var == '':
+				var = 'r'
+			if isrealnum(val):
+				self.const.append(var)
+				self.constvalues.append(val)
+			elif isinstance(val, list) or (isinstance(val, np.ndarray) and val.ndim == 1):
+				if len(val) == 1:
+					self.const.append(var)
+					self.constvalues.append(val[0])
+				else:
+					self.var.append(var)
+					self.values.append(np.array(val))
+					self.ndim += 1
+					self.shape.append(len(val))
+			else:
+				raise TypeError("Invalid value")
+		allvar = self.var + self.const
+		if self.vtype in ['x', 'y', 'z']:
+			if len(allvar) != 1 or allvar[0] != self.vtype:
+				raise ValueError("Variable '%s' not valid for vector type '%s'" % (allvar[0], self.vtype))
+		elif self.vtype == 'xy':
+			for i in allvar:
+				if i not in ['x', 'y']:
+					raise ValueError("Variable '%s' not valid for vector type '%s'" % (i, self.vtype))
+			for i in ['x', 'y']:
+				if i not in allvar:
+					self.const.append(i)
+					self.constvalues.append(0.0)
+		elif self.vtype == 'xyz':
+			for i in allvar:
+				if i not in ['x', 'y', 'z']:
+					raise ValueError("Variable '%s' not valid for vector type '%s'" % (i, self.vtype))
+			for i in ['x', 'y', 'z']:
+				if i not in allvar:
+					self.const.append(i)
+					self.constvalues.append(0.0)
+		elif self.vtype == 'pol':
+			for i in allvar:
+				if i not in ['', 'r', 'phi']:
+					raise ValueError("Variable '%s' not valid for vector type '%s'" % (i, self.vtype))
+			if '' not in allvar and 'r' not in allvar:
+				raise ValueError("Variable '' or 'r' required for vector type '%s', but missing" % self.vtype)
+			if 'phi' not in allvar:
+				self.const.append('phi')
+				self.constvalues.append(0.0)
+		elif self.vtype == 'cyl':
+			for i in allvar:
+				if i not in ['', 'r', 'phi', 'z']:
+					raise ValueError("Variable '%s' not valid for vector type '%s'" % (i, self.vtype))
+			if '' not in allvar and 'r' not in allvar:
+				raise ValueError("Variable '' or 'r' required for vector type '%s', but missing" % self.vtype)
+			for i in ['phi', 'z']:
+				if i not in allvar:
+					self.const.append(i)
+					self.constvalues.append(0.0)
+		elif self.vtype == 'sph':
+			for i in allvar:
+				if i not in ['', 'r', 'theta', 'phi']:
+					raise ValueError("Variable '%s' not valid for vector type '%s'" % (i, self.vtype))
+			if '' not in allvar and 'r' not in allvar:
+				raise ValueError("Variable '' or 'r' required for vector type '%s', but missing" % self.vtype)
+			for i in ['theta', 'phi']:
+				if i not in allvar:
+					self.const.append(i)
+					self.constvalues.append(0.0)
+		self.shape = tuple(self.shape)
+
+	def __getitem__(self, idx):
+		"""Get an instance of the (flat) array (argument is int) OR get the grid for a component (argument is str)"""
+		if isinstance(idx, str):
+			return self.get_grid(idx)
+		elif isinstance(idx, (int, np.integer)):
+			# preformance warning: OK for once, avoid calling in sequence
+			flatvalues = [gr.flatten() for gr in self.get_grid()]
+			flatvec = np.array(flatvalues).transpose()
+			return Vector(*(flatvec[idx]), astype = self.vtype, deg = self.degrees)
+		else:
+			raise IndexError
+
+	def get_array(self, comp = None):
+		"""Get array(s), i.e., factorized values
+
+		Argument:
+		comp   String or None. If None, then return a tuple of the values
+		       (arrays) for all variable components. If 'all', return a tuple of
+		       the values of all (including constant) components. If a string
+		       matching a component (e.g., 'x') return the values; this works
+		       for variable and constant components alike.
+		"""
+		if comp is None:
+			return tuple([np.array(val) for val in self.values])
+		elif comp == 'all':
+			return tuple([self.get_array(c) for c in self.get_components()])
+		elif comp in self.var:
+			i = self.var.index(comp)
+			return np.array(self.values[i])
+		elif comp in self.const:
+			i = self.const.index(comp)
+			return np.array([self.constvalues[i]])
+		else:
+			raise KeyError("Component '%s' is not defined" % comp)
+
+	def get_components(self, include_prefix = False):
+		"""Get natural components for the vector type
+
+		Argument:
+		include_prefix  True or False. Whether to append the prefix to the
+		                vector components.
+
+		Returns:
+		List of strings.
+		"""
+		if self.vtype in ['x', 'y', 'z']:
+			components = [self.vtype]
+		elif self.vtype == 'xy':
+			components = ['x', 'y']
+		elif self.vtype == 'xyz':
+			components = ['x', 'y', 'z']
+		elif self.vtype == 'pol':
+			components = ['r', 'phi']
+		elif self.vtype == 'cyl':
+			components = ['r', 'phi', 'z']
+		elif self.vtype == 'sph':
+			components = ['r', 'theta', 'phi']
+		else:
+			raise ValueError("Invalid vtype")
+		if include_prefix:
+			return [self.prefix if c == 'r' else self.prefix + c for c in components]
+		else:
+			return components
+
+	def get_grid(self, comp = None):
+		"""Get grid for one or more components.
+
+		Arguments:
+		comp   String or None. If a string, this must be one of the components
+		       in which the VectorGrid is defined. If None, use the 'natural'
+		       components.
+		"""
+		if isinstance(comp, str):
+			return self.get_array(comp)
+		elif isinstance(comp, list):
+			axisarrays = (self.get_array(c) for c in comp)
+			return np.meshgrid(*axisarrays, indexing = 'ij')
+		elif comp is None:
+			axisarrays = (self.get_array(c) for c in self.get_components())
+			return np.meshgrid(*axisarrays, indexing = 'ij')
+		else:
+			raise TypeError
+
+	def get_values(self, comp, flat = True):
+		"""Get (flat) values for a vector component.
+		Unlike get_grid(), this does not necessarily have to be one of the
+		components in which the VectorGrid is defined.
+
+		Arguments:
+		comp   String. The vector component.
+		flat   True or False. If True, return a one-dimensional array over all
+		       vectors in the grid. If False, return an array the same shape as
+		       the VectorGrid (like self.shape).
+
+		Returns:
+		A numpy array of floats.
+		"""
+		flatcomp = np.array([v.component(comp, prefix = self.prefix) for v in self])
+		return flatcomp if flat else flatcomp.reshape(self.shape)
+
+	def __iter__(self):
+		"""Iterator over flat array; yields Vector instances"""
+		flatvalues = [gr.flatten() for gr in self.get_grid()]
+		flatvec = np.array(flatvalues).transpose()
+		for v in flatvec:
+			yield Vector(*v, astype = self.vtype, deg = self.degrees)
+
+	def __len__(self):
+		"""Get total array size"""
+		size = 1
+		for x in self.shape:
+			size *= x
+		return size
+
+	def subgrid_shapes(self, dim):
+		"""Get total shape of d-dimensional subgrids (d = argument dim)"""
+		if dim == 0 or dim > len(self.shape):
+			return []
+		elif dim == 1:
+			return [(s,) for s in self.shape]
+		else:
+			return list(itertools.combinations(self.shape, dim))
+
+	def __min__(self):
+		"""Get a vector of minimal length (if not unique, return one of them)"""
+		if len(self) == 0:
+			return None
+		vmin, lmin = self[0], self[0].len()
+		for v in self:
+			if v.len() < lmin:
+				vmin = v
+				lmin = v.len()
+		return vmin
+
+	def __max__(self):
+		"""Get a vector of maximal length (if not unique, return one of them)"""
+		if len(self) == 0:
+			return None
+		vmax, lmax = self[0], self[0].len()
+		for v in self:
+			if v.len() > lmax:
+				vmax = v
+				lmax = v.len()
+		return vmax
+
+	def __eq__(self, other):
+		"""Test equality with another VectorGrid instance"""
+		if isinstance(other, VectorGrid):
+			return self.var == other.var and self.const == other.const and \
+				self.vtype == other.vtype and \
+				np.array_equal(self.values, other.values) and \
+				np.array_equal(self.constvalues, other.constvalues)
+		else:
+			# We raise a TypeError exception rather than returning
+			# NotImplemented, because we want to forbid comparisons with numpy
+			# types, which would invoke array expansion because VectorGrid is
+			# iterable.
+			raise TypeError("Comparison must be with another VectorGrid instance")
+
+	def index(self, v, flat = True, acc = None, angle_fold = True, fast_method_only = True):
+		"""Return index of a given vector. Acts as a 'find' function.
+
+		This function employs two methods: The 'fast method' compares the
+		components of the input vector to that of the arrays (variable and
+		constant) values of the vector grid. The 'slow method' finds vectors
+		by equality (of Vector instances).
+
+		Arguments:
+		v                 Vector instance or float.
+		flat              True or False. If True, return index in flat array. If
+		                  False, return (multi-dimensional) index in the grid.
+		acc               Float or None. If float, the maximum difference for
+		                  two vectors or values to be considered equal. If None,
+		                  find vectors by minimal distance (uses the slow method
+		                  only).
+		angle_fold        True or False. Whether to permit folding for angular
+		                  vector types.
+		fast_method_only  True or False. If True, return None if no match could
+		                  be found using the fast method. If False, retry using
+		                  the slow method.
+
+		Returns:
+		An integer (flat = True) or array/tuple of integers (flat = False).
+		"""
+		if acc is None:
+			diff = np.array([w - v for w in self])
+			idx = np.argmin(diff)
+			return idx if flat else np.unravel_index(idx, self.shape)
+		elif isinstance(v, Vector) and v.vtype == self.vtype:
+			components = v.components()
+			values = [v.value] if not isinstance(v.value, (list, tuple, np.ndarray)) else v.value
+			idx = []
+			full_angle = 360 if self.degrees else 2 * np.pi
+			for co, val in zip(components, values):
+				if co in self.const:
+					cval = self.constvalues[self.const.index(co)]
+					if abs(cval - val) > acc:
+						return None
+				elif co in self.var:
+					if co.endswith('phi'):
+						diff = diff_mod(self.values[self.var.index(co)], val, full_angle)
+					else:
+						diff = np.abs(self.values[self.var.index(co)] - val)
+					idx1 = np.argmin(diff)
+					if diff[idx1] < acc:
+						idx.append(idx1)
+					else:
+						break
+				else:
+					break
+			if len(idx) == len(self.var):
+				return np.ravel_multi_index(idx, self.shape) if flat else tuple(idx)
+			elif angle_fold and v.vtype == 'pol':
+				r, phi = v.value
+				v1 = Vector(-r, phi + full_angle / 2, astype = 'pol', deg = self.degrees)
+				return self.index(v1, flat = flat, acc = acc, angle_fold = False)
+			elif angle_fold and v.vtype == 'cyl':
+				r, phi, z = v.value
+				v1 = Vector(-r, phi + full_angle / 2, z, astype = 'cyl', deg = self.degrees)
+				return self.index(v1, flat = flat, acc = acc, angle_fold = False)
+			elif angle_fold and v.vtype == 'sph':
+				r, theta, phi = v.value
+				v1 = Vector(-r, full_angle / 2 - theta, phi + full_angle / 2, astype = 'sph', deg = self.degrees)
+				return self.index(v1, flat = flat, acc = acc, angle_fold = False)
+			elif fast_method_only:
+				return None
+			# else: fallthrough to 'slow' method
+
+		diff = np.array([w - v for w in self])
+		idx = np.argmin(diff)
+		if acc is not None and self[idx] - v > acc:
+			return None
+		return idx if flat else np.unravel_index(idx, self.shape)
+
+	def get_var_const(self, return_tuples = False, use_prefix = True):
+		"""Find variables and constants
+
+		Arguments:
+		use_prefix      True or False. If True (default), add the prefix. If
+		                False, return the bare variable names.
+		return_tuples   True or False. How to handle the return values. If False
+		                (default), then reduce 0-tuple to None and 1-tuple to
+		                its single element. If True, always return tuples.
+
+		Returns:
+		val       Tuple of values (arrays) for variable components.
+		var       Tuple of strings. The variable components.
+		constval  Tuple of floats or None. The constant values. None is returned
+		          when there are no constant values.
+		const     Tuple of strings. The constant components. None is returned
+		          when there are no constant values.
+		"""
+		val = tuple(self.values)
+		constval = tuple(self.constvalues)
+		if use_prefix:
+			var = tuple([add_var_prefix(v, self.prefix) for v in self.var])
+			const = tuple([add_var_prefix(c, self.prefix) for c in self.const])
+		else:
+			var = tuple(self.var)
+			const = tuple(self.const)
+		if return_tuples:
+			return val, var, constval, const
+
+		if len(self.const) == 0:
+			constval, const = None, None
+		elif len(self.const) == 1:
+			constval = self.constvalues[0]
+			const = add_var_prefix(self.const[0], self.prefix) if use_prefix else self.const[0]
+		if len(self.var) == 0:
+			val, var = None, None
+		elif len(self.var) == 1:
+			val = self.values[0]
+			var = add_var_prefix(self.var[0], self.prefix) if use_prefix else self.var[0]
+		return val, var, constval, const
+
+	def select(self, *arg, flat = True, acc = 1e-10, fold = None, deg = None):
+		"""Select certain vectors in the grid.
+		The argument specifies the component values that should match. For
+		example, grid.select('x', 0.1) returns all vectors with component x
+		equal to 0.1.
+
+		Arguments:
+		*arg    What to match. If a dict, it must be of the form {component:
+		        value, ...}. If a string and a value, interpret as single
+		        component and value. If two lists/tuples, interpret as multiple
+		        components and respective values.
+		flat    True or False. If True, return index in flat array. If False,
+		        return (multi-dimensional) index in the grid.
+		acc     Float. The maximum difference for two vectors to be considered
+		        equal.
+		fold    None. Not (yet) implemented.
+		deg     True or False. Whether to interpret input values of angular
+		        components as values in degrees (True) or radians (False).
+
+		Returns:
+		indices  Array of integers (flat = True) or multidimensional array
+		         of multi-indices (flat = False)
+		vectors  List of Vector instances. Only if flat = True.
+		"""
+
+		if len(arg) == 1 and isinstance(arg[0], dict):
+			matchval = arg[0]
+		elif len(arg) == 2 and isinstance(arg[0], str) and isrealnum(arg[1]):
+			matchval = {arg[0]: arg[1]}
+		elif len(arg) == 2 and isinstance(arg[0], (list, tuple)) and isinstance(arg[1], (list, tuple)):
+			matchval = {}
+			for var, val in zip(arg[0], arg[1]):
+				if not isinstance(var, str):
+					raise TypeError("Input must be a list of strings")
+				if not isrealnum(val):
+					raise TypeError("Input must be a list of numerical values")
+				matchval[var] = val
+		else:
+			raise TypeError("Invalid combination of arguments")
+
+		l = len(self)
+		if fold is not None:
+			raise NotImplementedError
+		else:
+			sel = np.ones(l, dtype = bool)
+			for var in matchval:
+				if (var.endswith('phi') or var.endswith('theta')) and deg is not None:
+					if deg and not self.degrees:
+						matchval[var] *= np.pi / 180.
+					elif not deg and self.degrees:
+						matchval[var] *= 180. / np.pi
+				if var in self.const:
+					constval = self.constvalues[self.const.index(var)]
+					if abs(matchval[var] - constval) > acc:
+						sel = np.zeros(l, dtype = bool)
+						break
+				else:
+					values = self.get_values(var, flat = True)
+					sel = sel & (np.abs(values - matchval[var]) < acc)
+		indices = np.arange(0, l)[sel]
+		vectors = [v for v, s in zip(self, sel) if s]
+		if flat:
+			return indices, vectors
+		else:
+			return np.unravel_index(indices, self.shape)
+
+	def subdivide(self, comp, subdivisions, quadratic = None):
+		"""Subdivide the grid
+
+		Arguments:
+		comp          String or None. Which component to subdivide. If the grid
+		              is 1-dimensional, the value None means the only variable
+		              component.
+		subdivisions  Integer. The number of subdivisions, i.e.,
+		              step_new = step_old / subdivisions.
+		quadratic     True, False, or None. Whether the grid is quadratic (True)
+		              or linear (False). If None, determine it automatically.
+
+		Returns:
+		A new VectorGrid instance.
+		"""
+		if comp is None:
+			if len(self.var) != 1:
+				raise ValueError("Component can only be None for 1D grids")
+			comp = self.var[0]
+		elif comp not in self.var:
+			raise ValueError("Only variable components can be subdivided")
+		if not isinstance(subdivisions, (int, np.integer)):
+			raise TypeError("Argument subdivisions should be a positive integer")
+		if subdivisions <= 0:
+			raise ValueError("Argument subdivisions should be strictly positive")
+		if subdivisions == 1:
+			return self
+		j = self.var.index(comp)
+		oldvalues = self.values[j]
+		n = len(oldvalues)
+		if quadratic is None:  # determine quadratic range automatically
+			if n < 3:
+				quadratic = False
+			else:
+				quadratic = (abs((oldvalues[2] - oldvalues[0]) / (oldvalues[1] - oldvalues[0]) - 4.0) < 0.01)
+		if quadratic:
+			oldindex = np.arange(0, n)**2
+			newindex = np.linspace(0, n - 1, (n - 1) * subdivisions + 1)**2
+		else:
+			oldindex = np.arange(0, n)
+			newindex = np.linspace(0, n - 1, (n - 1) * subdivisions + 1)
+		newvalues = np.interp(newindex, oldindex, oldvalues)
+
+		# Construct new VectorGrid
+		newarg = []
+		for var, val in zip(self.var, self.values):
+			newarg.append(var)
+			newarg.append(newvalues if var == comp else val)
+		for const, constval in zip(self.const, self.constvalues):
+			newarg.append(const)
+			newarg.append(constval)
+		return VectorGrid(*tuple(newarg), astype = self.vtype, deg = self.degrees, prefix = self.prefix)
+		# TODO: Subdivisions over multiple variables
+
+	def subdivide_to(self, comp, n_target, quadratic = None):
+		"""Subdivide the grid
+
+		Arguments:
+		comp          String or None. Which component to subdivide. If the grid
+		              is 1-dimensional, the value None means the only variable
+		              component.
+		n_target      Integer. The minimum number of grid points in the new
+		              grid. The new step size is chosen to be commensurate with
+		              the old one.
+		quadratic     True, False, or None. Whether the grid is quadratic (True)
+		              or linear (False). If None, determine it automatically.
+
+		Returns:
+		A new VectorGrid instance.
+		"""
+		if comp is None:
+			if len(self.var) != 1:
+				raise ValueError("Component can only be None for 1D grids")
+			comp = self.var[0]
+		elif comp not in self.var:
+			raise ValueError("Only variable components can be subdivided")
+		j = self.var.index(comp)
+		oldvalues = self.values[j]
+		n = len(oldvalues)
+		if (n_target - 1) % (n - 1) != 0:
+			raise ValueError("Target size is incommensurate with input size")
+		subdivisions = (n_target - 1) // (n - 1)
+		return self.subdivide(comp, subdivisions, quadratic = quadratic)
+
+	def symmetrize(self, axis = None, deg = None):
+		"""Symmetrize the vector grid by applying a transformation.
+
+		Arguments:
+		axis   String or VectorTransformation instance, or None. If a string,
+		       the axis or axes in which to apply reflection. If a
+		       VectorTransformation instance, define new grid points by applying
+		       the transformation to the existing grid. None is equivalent to
+		       'xyz'.
+		deg    True, False, or None. Whether the angular units of the new grid
+		       are degrees (True), radians (False), or the same as the present
+		       instance (None).
+
+		Returns:
+		newgrid    A new VectorGrid instance
+		mapping    If axis is a VectorTransformation instance, then a numpy
+		           array of integers. Set such that mapping[i] = j means that
+		           vector with index i of the present grid maps to vector with
+		           index j of the new grid. If axis is a string, then mapping is
+		           a dict {component: map, ...}, where map is such a mapping as
+		           for axis = VectorTransformation.
+
+		Note:
+		These are essentially two versions of the same version: The 'old style'
+		using reflections and the 'new style' using VectorTransformation.
+		Eventually, we might abandon the 'old style'.
+		"""
+		if deg is None:
+			deg = self.degrees
+		# Default axis (None) is equivalent to 'xyz'
+
+		if isinstance(axis, VectorTransformation):
+			tfm = axis  # TODO: rename variable
+			tgrid = tfm(self)
+			newgrid = self.extend(tgrid).sort()[0]
+			mapping = -np.ones(np.prod(newgrid.shape), dtype = int)
+			for j, v in enumerate(self):
+				i = newgrid.index(v, flat = True, acc = 1e-10)
+				if mapping[i] == -1:
+					mapping[i] = j
+			invtfm = tfm.inv()
+			for i, v in enumerate(newgrid):
+				if mapping[i] == -1:
+					j = self.index(invtfm(v), flat = True, acc = 1e-10)
+					if j is None:
+						sys.stderr.write("ERROR (VectorGrid.symmetrize): Result is not a grid [transformation %s].\n" % (tfm.name))
+						return None, None
+					mapping[i] = j
+			return newgrid, mapping
+		elif axis is None:
+			axis = 'xyz'
+		elif axis not in ['', 'x', 'y', 'z', 'xy', 'xyz']:
+			raise ValueError("Invalid axis")
+		if self.vtype == 'x':
+			newval, xmap = reflect_array(self.get_array('x')) if 'x' in axis else no_reflect_array(self.get_array('x'))
+			newgrid = VectorGrid('x', newval, astype = 'x', deg = deg, prefix = self.prefix)
+			mapping = {'x': xmap}
+		elif self.vtype == 'y':
+			newval, ymap = reflect_array(self.get_array('y')) if 'y' in axis else no_reflect_array(self.get_array('y'))
+			newgrid = VectorGrid('y', newval, astype = 'y', deg = deg, prefix = self.prefix)
+			mapping = {'y': ymap}
+		elif self.vtype == 'z':
+			newval, zmap = reflect_array(self.get_array('z')) if 'z' in axis else no_reflect_array(self.get_array('z'))
+			newgrid = VectorGrid('z', newval, astype = 'z', deg = deg, prefix = self.prefix)
+			mapping = {'z': zmap}
+		elif self.vtype == 'xy':
+			newxval, xmap = reflect_array(self.get_array('x')) if 'x' in axis else no_reflect_array(self.get_array('x'))
+			newyval, ymap = reflect_array(self.get_array('y')) if 'y' in axis else no_reflect_array(self.get_array('y'))
+			newgrid = VectorGrid('x', newxval, 'y', newyval, astype = 'xy', deg = deg, prefix = self.prefix)
+			mapping = {'x': xmap, 'y': ymap}
+		elif self.vtype == 'xyz':
+			newxval, xmap = reflect_array(self.get_array('x')) if 'x' in axis else no_reflect_array(self.get_array('x'))
+			newyval, ymap = reflect_array(self.get_array('y')) if 'y' in axis else no_reflect_array(self.get_array('y'))
+			newzval, zmap = reflect_array(self.get_array('z')) if 'z' in axis else no_reflect_array(self.get_array('z'))
+			newgrid = VectorGrid('x', newxval, 'y', newyval, 'z', newzval, astype = 'xyz', deg = deg, prefix = self.prefix)
+			mapping = {'x': xmap, 'y': ymap, 'z': zmap}
+		elif self.vtype == 'pol':
+			if len(self.get_array('phi')) == 1 and axis in ['xy', 'xyz']:
+				rval, rmap = reflect_array(self.get_array('r'))
+				newphival, phimap = no_reflect_array(self.get_array('phi'))
+			else:
+				rval, rmap = no_reflect_array(self.get_array('r'))
+				newphival, phimap = reflect_angular_array(self.get_array('phi'), axis, self.degrees)
+			newgrid = VectorGrid('r', rval, 'phi', newphival, astype = 'pol', deg = deg, prefix = self.prefix)
+			mapping = {'r': rmap, 'phi': phimap}
+		elif self.vtype == 'cyl':
+			if len(self.get_array('phi')) == 1 and axis in ['xy', 'xyz']:
+				rval, rmap = reflect_array(self.get_array('r'))
+				newphival, phimap = no_reflect_array(self.get_array('phi'))
+			else:
+				rval, rmap = no_reflect_array(self.get_array('r'))
+				newphival, phimap = reflect_angular_array(self.get_array('phi'), axis, self.degrees)
+			newzval, zmap = reflect_array(self.get_array('z')) if 'z' in axis else self.get_array('z')
+			newgrid = VectorGrid('r', rval, 'phi', newphival, 'z', newzval, astype = 'cyl', deg = deg, prefix = self.prefix)
+			mapping = {'r': rmap, 'phi': phimap, 'z': zmap}
+		elif self.vtype == 'sph':
+			if len(self.get_array('phi')) == 1 and len(self.get_array('theta')) == 1 and axis == 'xyz':
+				rval, rmap = reflect_array(self.get_array('r'))
+				newthetaval, thetamap = no_reflect_array(self.get_array('theta'))
+				newphival, phimap = no_reflect_array(self.get_array('phi'))
+			else:
+				rval, rmap = no_reflect_array(self.get_array('r'))
+				newthetaval, thetamap = reflect_array(self.get_array('theta'), offset = 180.0 if self.degrees else np.pi) if 'z' in axis else self.get_array('theta')
+				newphival, phimap = reflect_angular_array(self.get_array('phi'), axis, self.degrees)
+			newgrid = VectorGrid('r', rval, 'theta', newthetaval, 'phi', newphival, astype = 'sph', deg = deg, prefix = self.prefix)
+			mapping = {'r': rmap, 'theta': thetamap, 'phi': phimap}
+		return newgrid, mapping
+
+	def integration_element(self, dk = None, dphi = None, full = True, flat = True):
+		"""Get integration elements.
+		The function applies an appropriate multiplication factor if the input
+		is only	a fraction of the Brillouin zone, e.g., in the first quadrant.
+
+		Arguments:
+		dk    Float or None. Step size in the radial direction.
+		dphi  Float or None. Step size in the angular direction.
+		full  True or False. Whether to extend to a full circle or square, if
+		      the vector grid spans it only partially.
+		flat  True or False. If True, the output array will be one-dimensional.
+		      If False, it will have the same shape as the grid.
+
+		Returns:
+		A numpy array, which may be multi-dimensional if flat is False and if
+		the grid also has this property.
+
+		Note:
+		See linear_integration_element() and quadratic_integration_element() for
+		more details.
+		"""
+		if 'x' in self.var and 'y' in self.var:  # Cartesian
+			xval = self.get_array('x')
+			yval = self.get_array('y')
+			dx = linear_integration_element(xval, fullcircle = False)
+			dy = linear_integration_element(yval, fullcircle = False)
+			mult = 1.0
+			if full and abs(min(xval)) < 1e-9:
+				mult *= 2.0
+			if full and abs(min(yval)) < 1e-9:
+				mult *= 2.0
+			da = np.outer(dx, dy) * mult
+			return da.flatten() if flat else da
+		elif 'x' in self.var:  # 1D, along x
+			xval = self.get_array('x')
+			rmax = np.amax(np.abs(xval))
+			return circular_integration_element(xval, dk, rmax, full = full)
+		elif 'y' in self.var:  # 1D, along y
+			yval = self.get_array('y')
+			rmax = np.amax(np.abs(yval))
+			return circular_integration_element(yval, dk, rmax, full = full)
+		elif 'z' in self.var and len(self.var) == 1:  # 1D, along z
+			zval = self.get_array('z')
+			mult = 1.0
+			# mult = 2.0 if full and abs(min(zval)) < 1e-9 else 1.0
+			return linear_integration_element(zval, fullcircle = False) * mult
+		elif self.vtype == 'pol' and 'phi' in self.var:
+			rval = self.get_array('r')
+			phival = self.get_array('phi')
+			if self.degrees:
+				phival *= np.pi / 180.
+			rmax = np.amax(np.abs(rval))
+			dr2 = quadratic_integration_element(rval, dk, rmax)
+			dphi = linear_integration_element(phival, dphi, phival.min(), phival.max(), full)
+			da = np.outer(dr2, dphi)
+			return da.flatten() if flat else da
+		elif self.vtype == 'pol' and 'phi' not in self.var:
+			rval = self.get_array('r')
+			rmax = np.amax(np.abs(rval))
+			return circular_integration_element(rval, dk, rmax)
+		else:
+			sys.stderr.write("Warning (VectorGrid.integration_element): Not yet implemented for this type (%s) and/or combination of components %s\n" % (self.vtype, tuple(self.var)))
+			return None
+
+	def volume(self, *args, **kwds):
+		"""Return the total volume of the grid
+		This is simply the sum over all integration elements.
+		TODO: Return more accurate values from min and max values of self.var.
+		"""
+		ie = self.integration_element(*args, **kwds)
+		return np.nan if ie is None else np.sum(ie)
+
+	def jacobian(self, component, unit=False):
+		"""Return the Jacobian for calculating a derivative.
+
+		This function returns the derivatives dvi/dc, where vi are the natural
+		components of the vector grid and c is the input component. This is used
+		for a variable substitution. The result is the ingredient for the chain
+		rule:
+		df/dc = df/dv1 * dv1/dc + df/dv2 * dv2/dc + df/dv3 * dv3/dc.
+		If the option unit is set to True, then return the derivatives with
+		respect to the unit vectors, thus one obtains the derivatives dui/dc in
+		∇f.unitvec(c) = df/dv1 * du1/dc + df/dv2 * du2/dc + df/dv3 * du3/dc;
+		note that dui/dc and dvi/dc only differ if c is an angular coordinate,
+		φ (phi) or θ (theta).
+
+		Notes:
+		The angular coordinates φ (phi) and θ (theta) are converted to radians.
+		Arrays will contain NaN values in singular points.
+
+		Arguments:
+		component   String. The input component c.
+		unit        True or False. If False, return the derivatives dvi/dc as
+		            is. If True, scale the values, i.e., return dui/dc; this
+		            option affects the φ (phi) and θ (theta) derivatives only.
+
+		Returns:
+		dv1_dc   Float or numpy array. Either a numerical value (constant) or a
+		         d-dimensional array, where d is the dimensionality of the
+		         vector grid.
+		dv2_dc   Float or numpy array. Only if d >= 2.
+		dv3_dc   Float or numpy array. Only if d == 3.
+		"""
+		nan = float('nan')
+		if component == self.prefix or component == '':
+			component = 'r'
+		elif component.startswith(self.prefix):
+			component = component[len(self.prefix):]
+		if component not in ['r', 'x', 'y', 'z', 'phi', 'theta']:
+			raise ValueError("Argument component must resolve to 'r', 'x', 'y', 'z', 'phi', or 'theta'.")
+
+		if self.vtype in ['x', 'y', 'z']:
+			if component == self.vtype:
+				return (1.0,)
+			elif component == 'r':
+				# dx/dr = sgn(r) where r = |x|
+				xyz = self.get_grid(self.vtype)
+				return (np.sign(xyz, where = (xyz >= 1e-6)),)
+			else:
+				return (nan,)
+		elif self.vtype == 'xy':
+			x, y = [np.squeeze(a) for a in self.get_grid()]
+			if component == 'x':
+				return 1.0, 0.0
+			elif component == 'y':
+				return 0.0, 1.0
+			elif component == 'r':
+				# dx/dr = x / r, dy/dr = y / r
+				r = np.sqrt(x**2 + y**2)
+				dxdr = np.divide(x, r, where = (r >= 1e-6))
+				dydr = np.divide(y, r, where = (r >= 1e-6))
+				dxdr[r < 1e-6] = nan
+				dydr[r < 1e-6] = nan
+				return dxdr, dydr
+			elif component == 'phi':
+				if unit:
+					r = np.sqrt(x**2 + y**2)
+					dxdphi = np.divide(-y, r, where = (r >= 1e-6))
+					dydphi = np.divide(x, r, where = (r >= 1e-6))
+					dxdphi[r < 1e-6] = nan
+					dydphi[r < 1e-6] = nan
+					return dxdphi, dydphi
+				else:
+					return -y, x  # dx/dφ = -y, dy/dφ = x
+			else:
+				return nan, nan
+		elif self.vtype == 'pol':
+			r, phi = [np.squeeze(a) for a in self.get_grid()]
+			if self.degrees:
+				phi *= np.pi / 180.0
+			if component == 'r':
+				return 1.0, 0.0
+			elif component == 'phi':
+				if unit:
+					dphidphi = np.divide(1, r, where = (r >= 1e-6))
+					dphidphi[r < 1e-6] = nan
+					return 0.0, dphidphi
+				else:
+					return 0.0, 1.0
+			elif component == 'x':
+				# dr/dx = cos(φ), dφ/dx = -sin(φ) / r
+				drdx = np.cos(phi)
+				dphidx = np.divide(-np.sin(phi), r, where = (r >= 1e-6))
+				drdx[r < 1e-6] = nan
+				dphidx[r < 1e-6] = nan
+				return drdx, dphidx
+			elif component == 'y':
+				# dr/dy = sin(φ), dφ/dy = cos(φ) / r
+				drdy = np.sin(phi)
+				dphidy = np.divide(np.cos(phi), r, where = (r >= 1e-6))
+				drdy[r < 1e-6] = nan
+				dphidy[r < 1e-6] = nan
+				return drdy, dphidy
+			else:
+				return nan, nan
+		elif self.vtype == 'xyz':
+			x, y, z = [np.squeeze(a) for a in self.get_grid()]
+			if component == 'x':
+				return 1.0, 0.0, 0.0
+			elif component == 'y':
+				return 0.0, 1.0, 0.0
+			elif component == 'z':
+				return 0.0, 0.0, 1.0
+			elif component == 'r':
+				# dx/dr = x / r, dy/dr = y / r, dz / dr = z / r
+				r = np.sqrt(x**2 + y**2 + z**2)
+				dxdr = np.divide(x, r, where = (r >= 1e-6))
+				dydr = np.divide(y, r, where = (r >= 1e-6))
+				dzdr = np.divide(z, r, where = (r >= 1e-6))
+				dxdr[r < 1e-6] = nan
+				dydr[r < 1e-6] = nan
+				dzdr[r < 1e-6] = nan
+				return dxdr, dydr, dzdr
+			elif component == 'theta':
+				# dx/dθ = xz / R, dy/dθ = yz / R, dz / dθ = -R with R = sqrt(x^2 + y^2)
+				R = np.sqrt(x**2 + y**2)
+				if unit:
+					# ∇f.unitvec(θ) = (1/r) df/dθ with r = sqrt(x^2 + y^2 + z^2)
+					r = np.sqrt(x**2 + y**2 + z**2)
+					dxdtheta = np.divide(x * z, R * r, where = (R >= 1e-6))
+					dydtheta = np.divide(y * z, R * r, where = (R >= 1e-6))
+					dzdtheta = np.divide(-R, r, where = (R >= 1e-6))
+				else:
+					dxdtheta = np.divide(x * z, R, where = (R >= 1e-6))
+					dydtheta = np.divide(y * z, R, where = (R >= 1e-6))
+					dzdtheta = -R
+				dxdtheta[R < 1e-6] = nan
+				dydtheta[R < 1e-6] = nan
+				dzdtheta[R < 1e-6] = nan
+				return dxdtheta, dydtheta, dzdtheta
+			elif component == 'phi':
+				if unit:
+					r = np.sqrt(x**2 + y**2)
+					dxdphi = np.divide(-y, r, where = (r >= 1e-6))
+					dydphi = np.divide(x, r, where = (r >= 1e-6))
+					dxdphi[r < 1e-6] = nan
+					dydphi[r < 1e-6] = nan
+					return dxdphi, dydphi, 0.0
+				else:
+					return -y, x, 0.0  # dx/dφ = -y, dy/dφ = x, dz/dφ = 0
+			else:
+				return nan, nan, nan
+		elif self.vtype == 'cyl':
+			r, phi, z = [np.squeeze(a) for a in self.get_grid()]
+			if self.degrees:
+				phi *= np.pi / 180.0
+			if component == 'r':
+				return 1.0, 0.0, 0.0
+			elif component == 'phi':
+				if unit:
+					# ∇f.unitvec(φ) = (1/r) df/dφ
+					dphidphi = np.divide(1, r, where = (r >= 1e-6))
+					dphidphi[r < 1e-6] = nan
+					return 0.0, dphidphi, 0.0
+				else:
+					return 0.0, 1.0, 0.0
+			elif component == 'x':
+				# dr/dx = cos(φ), dφ/dx = -sin(φ) / r, dz/dx = 0
+				drdx = np.cos(phi)
+				dphidx = np.divide(-np.sin(phi), r, where = (r >= 1e-6))
+				drdx[r < 1e-6] = nan
+				dphidx[r < 1e-6] = nan
+				return drdx, dphidx, 0.0
+			elif component == 'y':
+				# dr/dy = sin(φ), dφ/dy = cos(φ) / r, dz/dy = 0
+				drdy = np.sin(phi)
+				dphidy = np.divide(np.cos(phi), r, where = (r >= 1e-6))
+				drdy[r < 1e-6] = nan
+				dphidy[r < 1e-6] = nan
+				return drdy, dphidy, 0.0
+			elif component == 'z':
+				return 0.0, 0.0, 1.0
+			elif component == 'theta':
+				# dr/dθ = z, dφ/dθ = 0, dz/dθ = -r with r = sqrt(x^2 + y^2)
+				if unit:
+					# ∇f.unitvec(θ) = (1/R) df/dθ
+					# with R = sqrt(r^2 + z^2) = sqrt(x^2 + y^2 + z^2)
+					rr = np.sqrt(r**2 + z**2)
+					drdtheta = np.divide(z, rr, where = (rr >= 1e-6))
+					dzdtheta = np.divide(-r, rr, where = (rr >= 1e-6))
+					drdtheta[rr < 1e-6] = nan
+					dzdtheta[rr < 1e-6] = nan
+					return drdtheta, 0.0, dzdtheta
+				else:
+					return z, 0.0, -r
+			else:
+				return nan, nan, nan
+		elif self.vtype == 'sph':
+			r, theta, phi = [np.squeeze(a) for a in self.get_grid()]
+			if self.degrees:
+				theta *= np.pi / 180.0
+				phi *= np.pi / 180.0
+			if component == 'r':
+				return 1.0, 0.0, 0.0
+			elif component == 'theta':
+				if unit:
+					# ∇f.unitvec(θ) = (1/r) df/dθ
+					dthetadtheta = np.divide(1, r, where = (r >= 1e-6))
+					dthetadtheta[r < 1e-6] = nan
+					return 0.0, dthetadtheta, 0.0
+				else:
+					return 0.0, 1.0, 0.0
+			elif component == 'phi':
+				if unit:
+					# ∇f.unitvec(φ) = (1/R) df/dφ with R = r sin θ = sqrt(x^2 + y^2)
+					R = r * np.sin(theta)
+					dphidphi = np.divide(1, R, where = (R >= 1e-6))
+					dphidphi[R < 1e-6] = nan
+					return 0.0, 0.0, dphidphi
+				else:
+					return 0.0, 0.0, 1.0
+			elif component == 'x':
+				# dr/dx = x / r = sin θ cos φ
+				# dθ/dx = xz / (r^2 R) = (1/r) cos θ cos φ
+				# dφ/dx = -y / R^2 = -sin φ / r sin θ
+				R = r * np.sin(theta)  # R = r sin θ
+				drdx = np.sin(theta) * np.cos(phi)
+				dthetadx = np.divide(np.cos(theta) * np.cos(phi), r, where = (r >= 1e-6))
+				dphidx = np.divide(-np.sin(phi), R, where = (R >= 1e-6))
+				drdx[r < 1e-6] = nan
+				dthetadx[r < 1e-6] = nan
+				dphidx[r < 1e-6] = nan
+				return drdx, dthetadx, dphidx
+			elif component == 'y':
+				# dr/dy = y / r = sin θ sin φ
+				# dθ/dy = yz / (r^2 R) = (1/r) cos θ sin φ
+				# dφ/dy = x / R^2 = cos φ / r sin θ
+				R = r * np.sin(theta)  # R = r sin θ
+				drdy = np.sin(theta) * np.sin(phi)
+				dthetady = np.divide(np.cos(theta) * np.sin(phi), r, where = (r >= 1e-6))
+				dphidy = np.divide(np.cos(phi), R, where = (R >= 1e-6))
+				drdy[r < 1e-6] = nan
+				dthetady[r < 1e-6] = nan
+				dphidy[r < 1e-6] = nan
+				return drdy, dthetady, dphidy
+			elif component == 'z':
+				# dr/dz = cos θ, dθ/dz = -sin θ / r, dφ/dz = 0
+				drdz = np.cos(theta)
+				dthetadz = np.divide(-np.sin(theta), r, where = (r >= 1e-6))
+				drdz[r < 1e-6] = nan
+				dthetadz[r < 1e-6] = nan
+				return drdz, dthetadz, 0.0
+			else:
+				return nan, nan, nan
+		else:
+			raise ValueError("Invalid value for self.vtype")
+
+	def gradient_length_coeff(self):
+		"""Return the Jacobian factors for calculating the length of the gradient.
+
+		This function returns the coefficients ai, such that
+		|∇f|^2 = a1 (df/dv1)^2 + a2 (df/dv2)^2 + a3 (df/dv3)^2
+		where vi are the natural components of the vector grid. This result is
+		equivalent to squaring the result of the function VectorGrid.jacobian()
+		using the natural components of the vector grid and with unit=True.
+
+		Notes:
+		The derivatives in angular coordinates φ (phi) and θ (theta) in the
+		above expression should be in radians for the result to be correct.
+		Arrays will contain NaN values in singular points.
+
+		Returns:
+		a1       Float or numpy array. Either a numerical value (constant) or a
+		         d-dimensional array, where d is the dimensionality of the
+		         vector grid.
+		a2       Float or numpy array. Only if d >= 2.
+		a3       Float or numpy array. Only if d == 3.
+		"""
+		nan = float('nan')
+		if self.vtype in ['x', 'y', 'z']:
+			return (1.0,)
+		elif self.vtype == 'xy':
+			return 1.0, 1.0
+		elif self.vtype == 'pol':
+			# |∇f|^2 = (df/dr)^2 + (1/r^2) (df/dφ)^2
+			r, _ = [np.squeeze(a) for a in self.get_grid()]
+			a2 = np.divide(1.0, r**2, where = (r >= 1e-6))
+			a2[r < 1e-6] = nan
+			return 1.0, a2
+		elif self.vtype == 'xyz':
+			return 1.0, 1.0, 1.0
+		elif self.vtype == 'cyl':
+			# |∇f|^2 = (df/dr)^2 + (1/r^2) (df/dφ)^2 + (df/dz)^2
+			r, _, _ = [np.squeeze(a) for a in self.get_grid()]
+			a2 = np.divide(1.0, r**2, where = (r >= 1e-6))
+			a2[r < 1e-6] = nan
+			return 1.0, a2, 1.0
+		elif self.vtype == 'sph':
+			# |∇f|^2 = (df/dr)^2 + (1/r^2) (df/dθ)^2 + (1/rsinφ)^2 (df/dφ)^2
+			r, theta, _ = [np.squeeze(a) for a in self.get_grid()]
+			if self.degrees:
+				theta *= np.pi / 180.
+			R = r * np.sin(theta)
+			a2 = np.divide(1.0, r**2, where = (r >= 1e-6))
+			a3 = np.divide(1.0, R**2, where = (R >= 1e-6))
+			a2[r < 1e-6] = nan
+			a3[r < 1e-6] = nan
+			return 1.0, a2, a3
+		else:
+			raise ValueError("Invalid value for self.vtype")
+
+	def get_derivative_components(self):
+		if self.vtype in ['xyz', 'cyl', 'sph'] and len(self.var) == 3:
+			return ['', 'r', 'x', 'y', 'z', 'theta', 'phi']
+		if len(self.var) == 2:
+			var = tuple(self.var)
+			deriv_components_2d = {
+				('x', 'y'):       ['r', 'x', 'y', 'phi'],
+				('x', 'z'):       ['r', 'x', 'z', 'theta'],
+				('y', 'z'):       ['r', 'y', 'z', 'theta'],
+				('r', 'phi'):     ['', 'r', 'x', 'y', 'phi'],
+				('r', 'z'):       ['', 'r', 'x', 'y', 'z', 'theta'],
+				('phi', 'z'):     ['x', 'y', 'z', 'theta', 'phi'],
+				('r', 'theta'):   ['', 'r', 'x', 'y', 'z', 'theta'],
+				('theta', 'phi'): ['x', 'y', 'z', 'theta', 'phi']
+			}
+			if var in deriv_components_2d:
+				return deriv_components_2d[var]
+			elif (var[1], var[0]) in deriv_components_2d:
+				return deriv_components_2d[(var[1], var[0])]
+			else:
+				raise ValueError("Invalid combination of variables")
+		if len(self.var) == 1:
+			if self.var[0] == 'r':
+				return ['', 'r']
+			else:
+				return [self.var[0]]
+		raise ValueError("Invalid combination of variables")
+
+
+	# Comparisons
+	def identical(self, other, acc = 1e-9):
+		"""Test identity of two VectorGrid instances.
+		Two VectorGrid instances are identical if they are of the same shape,
+		have the same vector type, and contain the same values in the same
+		order.
+
+		Arguments:
+		other   VectorGrid instance. The second vector grid.
+		acc     Float. The maximal difference between two values below which
+		        they are considered equal.
+		"""
+		if not isinstance(other, VectorGrid):
+			raise TypeError("Comparison must be with another VectorGrid instance")
+		if self.ndim != other.ndim:
+			return False
+		if self.var != other.var or len(self.values) != len(other.values):
+			return False
+		if self.const != other.const or len(self.constvalues) != len(other.constvalues):
+			return False
+		if self.shape != other.shape:
+			return False
+		if self.vtype != other.vtype:
+			return False
+		for v1, v2 in zip(self.values, other.values):
+			if len(v1) != len(v2):
+				return False
+			if np.amax(np.abs(v1 - v2)) > acc:
+				return False
+		for c1, c2 in zip(self.constvalues, other.constvalues):
+			if abs(c1 - c2) > acc:
+				return False
+		return True
+
+	def equal(self, other, acc = 1e-9):
+		"""Test equality of two VectorGrid instances.
+		Two VectorGrid instances are equal if they are of the same shape and
+		have the same values in the same order, but possibly with a different
+		vector type.
+
+		Arguments:
+		other   VectorGrid instance. The second vector grid.
+		acc     Float. The maximal difference between two vectors below which
+		        they are considered equal.
+		"""
+		if not isinstance(other, VectorGrid):
+			raise TypeError("Comparison must be with another VectorGrid instance")
+		if len(self) != len(other):
+			return False
+		for v1, v2 in zip(self, other):
+			if not v1.equal(v2, acc):
+				return False
+		return True
+
+	def get_subset(self, indices):
+		"""Get subgrid of VectorGrid from (numpy style) array index.
+
+		Arguments:
+		indices    Tuple of integers and slice objects. A numpy style array
+		           index.
+
+		Returns:
+		newgrid    VectorGrid instance. A new instance with the subset grid.
+		"""
+		if len(indices) > len(self.var):
+			raise IndexError(f"Too many indices for VectorGrid of shape {self.shape}")
+		newarg = []
+		for var, val, idx in zip(self.var, self.values, indices):
+			newarg.append(var)
+			newarg.append(val[idx])
+		for var, val in zip(self.const, self.constvalues):
+			newarg.append(var)
+			newarg.append(val)
+		return VectorGrid(*tuple(newarg), astype = self.vtype, deg = self.degrees, prefix = self.prefix)
+
+	def is_subset_of(self, other, acc = 1e-9):
+		"""Test whether the present VectorGrid is a subset of another VectorGrid instance.
+		The answer is True if all vectors from the present instance are
+		contained also in the other instance. The comparison is preformed by
+		identity, i.e., the vector types/components and the dimensionality must
+		be identical for the answer to be possibly True.
+
+		Arguments:
+		other   VectorGrid instance. The second vector grid.
+		acc     Float. The maximal difference between two values below which
+		        they are considered equal.
+		"""
+		if self.ndim != other.ndim:
+			return False
+		comp1 = self.get_components()
+		comp2 = other.get_components()
+		if comp1 != comp2:
+			return False
+		for co in comp1:
+			val1 = self.get_array(co)
+			val2 = other.get_array(co)
+			delta = np.abs(val1[:, np.newaxis] - val2[np.newaxis, :])
+			if np.amax(np.amin(delta, axis = 1)) > acc:
+				return False
+		return True
+
+	def is_compatible_with(self, other, acc = 1e-9):
+		"""Test whether the union of two vector grids is a vector grid.
+		Two VectorGrid instances are 'compatible' if their union again defines
+		a grid. For this to be True, the values must be the same at all axes
+		except for mostly one of them. (Think of this problem geometrically:
+		When is the union of two rectangles again a rectangle?)
+
+		Arguments:
+		other   VectorGrid instance. The second vector grid.
+		acc     Float. The maximal difference between two values below which
+		        they are considered equal.
+		"""
+		comp1 = self.get_components()
+		comp2 = other.get_components()
+		if comp1 != comp2:
+			return False
+
+		n_nonequal = 0
+		for co in comp1:
+			val1 = self.get_array(co)
+			val2 = other.get_array(co)
+			delta = np.abs(val1[:, np.newaxis] - val2[np.newaxis, :])
+			subset = (np.amax(np.amin(delta, axis = 1)) <= acc)
+			superset = (np.amax(np.amin(delta, axis = 0)) <= acc)
+			if not subset and not superset:
+				n_nonequal += 1
+		return n_nonequal <= 1  # the number of nonequal axes must be either zero or one
+
+	def is_sorted(self, increasing = False, strict = True):
+		"""Test whether the values are sorted.
+
+		Arguments:
+		increasing   True or False. If True, accept sorted values in ascending
+		             (increasing) order only. If False, also accept reverse
+		             (descending/decreasing) order also.
+		strict       True or False. If True, the values must be strictly
+		             monotonic for the function to return True. If False, also
+		             accept equal subsequent values.
+
+		Returns:
+		True or False.
+		"""
+		if increasing:
+			if strict:
+				result = [np.all(np.diff(val) > 0) for val in self.values]
+			else:
+				result = [np.all(np.diff(val) >= 0) for val in self.values]
+		else:
+			if strict:
+				result = [np.all(np.diff(val) > 0) or np.all(np.diff(val) < 0) for val in self.values]
+			else:
+				result = [np.all(np.diff(val) >= 0) or np.all(np.diff(val) <= 0) for val in self.values]
+		return all(result)
+
+	def zero(self):
+		"""Test whether all vectors in the grid are zero."""
+		return all([v.zero() for v in self])
+
+	def is_vertical(self):
+		"""Test whether VectorGrid has vertical (z) components only.
+		The negation is useful to check for in-plane components of magnetic fields
+		"""
+		zaxis = Vector(1.0, astype = 'z')
+		return all([v.parallel(zaxis) for v in self])
+
+	def is_inplane(self):
+		"""Test whether VectorGrid has in-plane (x, y) components only.
+		The negation is useful to check for out-of-plane components of magnetic fields
+		"""
+		zaxis = Vector(1.0, astype = 'z')
+		return all([v.perpendicular(zaxis) for v in self])
+
+	def sort(self, in_place = False, flat_indices = False, expand_indices = False):
+		"""Sort by value and provide sorting indices (like argsort).
+
+		Arguments:
+		in_place        True or False. If True, return the present VectorGrid
+		                instance. If False, return a new instance.
+		flat_indices    True or False. See comments for return value.
+		expand_indices  True or False. See comments for return value.
+
+		Returns:
+		grid_new   The present VectorGrid instance or a new one.
+		indices    Sort indices, comparable to the result of an 'argsort'. If
+		           flat_indices and expand_indices are both False, return the
+		           separate sort orders for the variable arrays. If flat_indices
+		           is True, return the sort order of the flattened array. If
+		           expand_indices is True, return a multi-dimensional array with
+		           multi-indices. (The resulting array has dimension ndim + 1.)
+		           flat_indices and expand_indices cannot be True
+		           simultaneously.
+		"""
+		order = [np.argsort(val) for val in self.values]
+		newval = [np.sort(val) for val in self.values]
+		if flat_indices and expand_indices:
+			raise ValueError("Arguments flat_indices and expand_indices cannot both be True.")
+		elif flat_indices:
+			grid_order = np.meshgrid(*order, indexing = 'ij')
+			indices = np.ravel_multi_index([go.flatten() for go in grid_order], self.shape)
+		elif expand_indices:
+			grid_order = np.meshgrid(*order, indexing = 'ij')
+			indices = np.stack(grid_order, axis = -1)
+		else:
+			indices = order
+		if in_place:
+			self.values = newval
+			return self, indices
+		else:
+			newarg = []
+			for var, val in zip(self.var, newval):
+				newarg.append(var)
+				newarg.append(val)
+			for const, constval in zip(self.const, self.constvalues):
+				newarg.append(const)
+				newarg.append(constval)
+			return VectorGrid(*tuple(newarg), astype = self.vtype, deg = self.degrees, prefix = self.prefix), indices
+
+	def extend(self, other, acc = 1e-9):
+		"""Extend the present VectorGrid instance with another one
+
+		Arguments:
+		other   VectorGrid instance. The second vector grid.
+		acc     Float. The maximal difference between two values below which
+		        they are considered equal.
+
+		Returns:
+		A new VectorGrid instance.
+		"""
+		if not self.is_compatible_with(other, acc):
+			raise ValueError("Two VectorGrid instances are not compatible")
+
+		comp = self.get_components()
+		newarg = []
+		for co in comp:
+			val1 = self.get_array(co)
+			val2 = other.get_array(co)
+			delta = np.abs(val1[:, np.newaxis] - val2[np.newaxis, :])
+			subset = (np.amax(np.amin(delta, axis = 1)) <= acc)
+			superset = (np.amax(np.amin(delta, axis = 0)) <= acc)
+			if not subset and not superset:
+				newval = np.concatenate((val1, val2[np.amin(delta, axis = 0) > acc]))
+			elif subset and not superset:
+				newval = np.concatenate((val1, val2[np.amin(delta, axis = 0) > acc]))
+			elif not subset and superset:
+				newval = np.concatenate((val1[np.amin(delta, axis = 1) > acc], val2))
+			else:
+				newval = val1
+			newarg.append(co)
+			newarg.append(newval)
+		return VectorGrid(*tuple(newarg), astype = self.vtype, deg = self.degrees, prefix = self.prefix)
+
+	def to_dict(self):
+		"""Return a dict related to the VectorGrid"""
+		grid_dict = {}
+		pf = '' if self.prefix is None else self.prefix
+		for var, val in zip(self.var, self.values):
+			fullvar = pf if (pf and var == 'r') else ('%s_%s' % (pf, var))
+			grid_dict[fullvar + '_min'] = np.amin(val)
+			grid_dict[fullvar + '_max'] = np.amax(val)
+			grid_dict[fullvar + '_n'] = len(val)
+		for const, val in zip(self.const, self.constvalues):
+			fullconst = pf if (pf and const == 'r') else ('%s_%s' % (pf, const))
+			grid_dict[fullconst] = val
+		if len(self.shape) == 0:
+			grid_dict[pf + '_shape'] = '1'
+		else:
+			times = '\u00d7'  # multiplication sign
+			grid_dict[pf + '_shape'] = times.join(['%i' % x for x in self.shape])
+		return grid_dict
+
+def vectorgrid_from_components(val, var, constval, const, **kwds):
+	"""Return a VectorGrid instance from VectorGrid.get_var_const() output.
+	This 'wrapper' puts the arguments to the VectorGrid initializer in the
+	correct order.
+
+	Arguments:
+	val       Number, list/array or tuple thereof. The values of the variables
+	          in the vector grid.
+	var       String or tuple of strings. The labels (vector components) of the
+	          variables.
+	constval  Number or tuple of numbers. The values for the constants of the
+	          vector grid.
+	const     String or tuple of strings. The labels (vector components) of the
+	          constants.
+	**kwds    Keyword arguments passed to VectorGrid initializer.
+
+	Note:
+	The pairs {val, var}, and {constval, const} must be tuples of equal length.
+	A 1-tuple can be replaced by a single value. A 0-tuple can be replaced by
+	None.
+
+	Returns:
+	grid      A VectorGrid instance.
+	"""
+	if val is None:
+		val = ()
+	elif isinstance(val, tuple):
+		pass
+	elif isinstance(val, (float, int, np.floating, np.integer, list, np.array)):
+		val = (val,)
+	else:
+		raise TypeError("Argument val must be tuple, numeric, list, or array")
+	if var is None:
+		var = ()
+	elif isinstance(var, str):
+		var = (var,)
+	elif isinstance(var, tuple) and all([isinstance(v, str) for v in var]):
+		pass
+	else:
+		raise TypeError("Argument var must be str of tuple of str")
+	if len(var) != len(val):
+		raise ValueError
+	vgargs = []
+	for var1, val1 in zip(var, val):
+		vgargs.append(var1)
+		vgargs.append(val1)
+	if const is None and constval is None:
+		pass
+	elif isinstance(const, str) and isinstance(constval, (float, int, np.floating, np.integer)):
+		vgargs.append(const)
+		vgargs.append(constval)
+	elif isinstance(const, tuple) and isinstance(constval, tuple):
+		if len(const) != len(constval):
+			raise ValueError("Arguments constval and const must be of equal length")
+		for c, cval in zip(const, constval):
+			vgargs.append(c)
+			vgargs.append(cval)
+	else:
+		raise TypeError("Invalid combination of types for arguments constval and const")
+	return VectorGrid(*vgargs, **kwds)
+
+
+class ZippedKB:
+	"""Container class for combination of two VectorGrids, for momentum and magnetic field.
+
+	Attributes:
+	k   VectorGrid instance, Vector instance, float, or None. Momentum values.
+	b   VectorGrid instance, Vector instance, float, or None. Magnetic field
+	    values.
+
+	Note:
+	Either k or b may be of length > 1 (VectorGrid or list with more than one
+	element), but not both.
+	"""
+	def __init__(self, k, b):
+		lk = 1 if k is None or isinstance(k, (float, np.floating, Vector)) else len(k)
+		lb = 1 if b is None or isinstance(b, (float, np.floating, Vector)) else len(b)
+		if lk > 1 and lb > 1:
+			raise ValueError("At least one component must be a constant")
+		self.k = [Vector(0.0, astype = 'x')] if k is None else [k] if isinstance(k, (float, np.floating, Vector)) else k
+		self.b = [Vector(0.0, astype = 'z')] if b is None else [b] if isinstance(b, (float, np.floating, Vector)) else b
+
+	def __len__(self):
+		"""Get length (number of elements in either k or b)"""
+		return max(len(self.k), len(self.b))
+
+	def shape(self):
+		"""Get shape of either k or b, whichever is not constant"""
+		if len(self.k) > 1:
+			return (len(self.k),) if isinstance(self.k, list) else self.k.shape
+		elif len(self.b) > 1:
+			return (len(self.b),) if isinstance(self.b, list) else self.b.shape
+		else:
+			return (1,)
+
+	def __iter__(self):
+		"""Iterator over flat array.
+
+		Yields:
+		Tuple of two Vector instances (or float, if appropriate)
+		"""
+		if len(self.k) > 1 and len(self.b) == 1:
+			for k in self.k:
+				yield (k, self.b[0])
+		elif len(self.k) == 1 and len(self.b) > 1:
+			for b in self.b:
+				yield (self.k[0], b)
+		elif len(self.k) == 1 and len(self.b) == 1:
+			yield (self.k[0], self.b[0])
+
+	def __getitem__(self, idx):
+		"""Get element.
+
+		Returns:
+		Tuple of two Vector instances (or float, if appropriate)
+		"""
+		if not isinstance(idx, (int, np.integer)):
+			raise TypeError("Index must be an integer")
+		if len(self.k) > 1 and len(self.b) == 1:
+			return (self.k[idx], self.b[0])
+		elif len(self.k) == 1 and len(self.b) > 1:
+			return (self.k[0], self.b[idx])
+		elif len(self.k) == 1 and len(self.b) == 1 and idx == 0:
+			return (self.k[0], self.b[0])
+		else:
+			raise ValueError("Illegal index value")
+
+	def dependence(self):
+		"""Return k or b, whichever is not constant."""
+		if len(self.k) > 1:
+			return "k"
+		elif len(self.b) > 1:
+			return "b"
+		else:
+			return ""
+
+	def get_grid(self):
+		"""Get the grid of k or b, whichever is not constant."""
+		if len(self.k) > 1 and isinstance(self.k, VectorGrid):
+			return self.k
+		elif len(self.b) > 1 and isinstance(self.b, VectorGrid):
+			return self.b
+		else:
+			return None
+
+	def to_dict(self):
+		"""Return a dict related to the VectorGrid instances or values k and b"""
+		grid_dict = {}
+		if isinstance(self.k, VectorGrid):
+			grid_dict.update(self.k.to_dict())
+		elif len(self.k) == 1:
+			if isinstance(self.k[0], Vector):
+				grid_dict.update(self.k[0].to_dict(prefix = 'k'))
+			elif isinstance(self.k[0], (float, np.floating)):
+				grid_dict['k'] = self.k[0]
+		if isinstance(self.b, VectorGrid):
+			grid_dict.update(self.b.to_dict())
+		elif len(self.b) == 1:
+			if isinstance(self.b[0], Vector):
+				grid_dict.update(self.b[0].to_dict(prefix = 'b'))
+			elif isinstance(self.b[0], (float, np.floating)):
+				grid_dict['b'] = self.b[0]
+		return grid_dict
+
+
+def get_momenta_from_locations(all_kval1, locations, exact_match = None):
+	"""Get momenta from location labels.
+
+	Arguments:
+	all_kval     ZippedKB instance or VectorGrid. Contains a grid of all
+	             momentum values.
+	locations    List/array of strings or floats.
+	exact_match  True, False or None. If True, momentum values must match the
+	             location exactly; if there is not an exact match, 'skip' the
+	             location ('old' behaviour). If False, find the nearest match
+	             for all locations. If None, extract it from configuration.
+
+	Returns:
+	A VectorGrid instance with the momenta that correspond to a valid location
+	label or value.
+	"""
+	if exact_match is None:
+		exact_match = get_config_bool('wf_locations_exact_match')
+	# TODO: If locations is a VectorGrid instance, we get an error. What is this supposed to do?
+	if isinstance(all_kval1, ZippedKB):
+		all_kval = all_kval1.b if all_kval1.dependence() == 'b' else all_kval1.k
+	else:
+		all_kval = all_kval1
+	if isinstance(all_kval, (list, np.ndarray)):
+		out_kval = []
+		l = len(all_kval)
+		k_maxstep = 0.0 if l == 1 else np.max(np.abs(np.diff(np.sort(all_kval))))
+		for loc in locations:
+			if isinstance(loc, (float, np.floating)):
+				if not exact_match:
+					diffs = np.abs(np.abs(all_kval) - loc)
+					idx = np.argmin(diffs)
+					this_diff = diffs[idx]
+					## Accept non-exact match only if not too far away from
+					## values in all_kval. The maximal acceptable distance is
+					## the largest difference between two values in all_kval.
+					if this_diff < k_maxstep + 1e-6:
+						loc = all_kval[idx]
+					else:
+						sys.stderr.write("ERROR (get_momenta_from_locations): Location '%s' does not match momentum value; (too far) out of range.\n" % loc)
+						continue
+				for k in all_kval:
+					if abs(abs(k) - loc) < 1e-6:
+						out_kval.append(k)
+			elif loc == 'zero':
+				for k in all_kval:
+					if abs(k) < 1e-6:
+						out_kval.append(k)
+			elif loc == 'min':
+				out_kval.append(all_kval[0])
+			elif loc == 'max':
+				out_kval.append(all_kval[-1])
+			elif loc == 'all':
+				out_kval.extend(all_kval)
+			else:
+				if loc == 'mid':
+					loc = '1/2'
+				try:
+					frac = [int(i) for i in loc.split('/')]
+					if not exact_match or l % frac[1] == 1:
+						out_kval.append(all_kval[(l - 1) * frac[0] // frac[1]])
+					else:
+						sys.stderr.write("ERROR (get_momenta_from_locations): Momentum list not commensurate with point '%s'.\n" % loc)
+				except:
+					sys.stderr.write("ERROR (get_momenta_from_locations): Invalid location '%s'.\n" % loc)
+		return sorted(list(set(out_kval)))
+	elif isinstance(all_kval, VectorGrid):
+		vg_arg = []
+		for co in all_kval.get_components():
+			val = all_kval.get_array(co)
+			if len(val) == 1:
+				compval = val
+			else:
+				compval = get_momenta_from_locations(val, locations)
+			vg_arg.append(co)
+			vg_arg.append(compval)
+		return VectorGrid(*vg_arg, astype = all_kval.vtype, deg = all_kval.degrees, prefix = all_kval.prefix)
+	else:
+		raise TypeError("Input must be a list/array or a VectorGrid instance.")
+
+def locations_index(locations, vec, vec_numeric = None):
+	"""Find a value in locations list matching vector vec, and return its index.
+
+	Arguments:
+	locations     List, array, or VectorGrid. Contains the vectors or values
+	              used for matching against. For this argument, the return value
+	              of get_momenta_from_locations() can be used.
+	vec           Vector or number. The vector which is matched against the
+	              vectors or values in argument locations.
+	vec_numeric   Number or None. If a number, use this value if using the
+	              numerical match fallback.
+
+	Returns:
+	match   Integer or None. Index of the matching value in locations if any
+	        value in locations matches vec, None if none matches. If both inputs
+	        are Vectors of the same type, then check for identity. Otherwise, if
+	        locations contains Vectors, check equality. Otherwise, check
+	        equality of numerical value.
+	"""
+	if vec_numeric is None:
+		vec_numeric = vec.len() if isinstance(vec, Vector) else vec
+	for j, loc in enumerate(locations):
+		if isinstance(loc, Vector) and isinstance(vec, Vector) and loc.vtype == vec.vtype:
+			if loc.identical(vec):
+				return j
+		elif isinstance(loc, Vector) and loc.equal(vec):
+			return j
+		elif isinstance(loc, (int, float, np.integer, np.floating)) and np.abs(loc - vec_numeric) < 1e-9:
+			return j
+	return None
diff --git a/kdotpy-v1.0.0/src/kdotpy/observables.py b/kdotpy-v1.0.0/src/kdotpy/observables.py
new file mode 100644
index 0000000000000000000000000000000000000000..ba7bee33856972b12a5636e5bbc2d920e320984d
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/observables.py
@@ -0,0 +1,1615 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+import sys
+import re
+from scipy.sparse import dia_matrix, csc_matrix, coo_matrix, issparse
+from .physparams import PhysParams
+from .hamiltonian import parity_x, parity_y, parity_z, hexchange, hzeeman, hstrain, hz_block_diag
+from .phystext import format_unit
+from . import spinmat
+
+### HELPER FUNCTION ###
+indexed_obs_regex = r"([A-Za-z0-9_]+)\[([+-]?[0-9]+)\]$"  # used below several times
+def get_index_from_obs_string(s):
+	m = re.match(indexed_obs_regex, s)
+	return None if m is None else int(m.group(2))
+
+def obsid_to_tex(obsid, dimful = None):
+	"""Get quantity and unit string in TeX style from observable id
+
+	Arguments:
+	obsid   String
+	dimful  True, False, or None. Whether to get the quantity and unit strings
+	        for dimensionful observables. If None, take the value from
+	        all_observables.
+
+	Returns:
+	qstr    String. TeX formatted string for physical quantity.
+	ustr    String. TeX formatted string for unit.
+	"""
+	if dimful is None:
+		dimful = all_observables.dimful is True
+	if obsid not in all_observables:
+		sys.stderr.write("Warning (obsid_to_tex): Observable '%s' not defined.\n" % obsid)
+		return None, None
+	obs = all_observables[obsid]
+	qstr = obs.to_str(style = 'tex', dimful = dimful)
+	ustr = obs.get_unit_str(style = 'tex', dimful = dimful)
+	if '%i' in qstr:
+		idx = get_index_from_obs_string(obsid)
+		if idx is not None:
+			qstr = qstr % idx
+		else:
+			sys.stderr.write("ERROR (obsid_to_tex): No index value for indexed observable.\n")
+			qstr = qstr.replace('%i', '?')
+	return (qstr, ustr)
+
+### MATRIX TOOLS ###
+def blockdiag(mat, nblocks, offset = 0):
+	"""Construct a sparse block matrix in COO format
+	This function is faster than scipy.sparse.block_diag() for larger matrices
+	It is also more restricted though, as all blocks are identical
+
+	Arguments:
+	mat      Numpy array of two dimensions, or scipy sparse matrix. The matrix
+	         that constitutes one block.
+	nblocks  Integer. The number of blocks.
+	offset   Integer. If nonzero, the blocks will be placed off-diagonally; +1
+	         means one position below the diagonal, -1 one position above; the
+	         absolute value must be smaller than nblocks.
+
+	Note:
+	For larger input matrices (argument 'mat'), it is advisable to use a sparse
+	format for better performance.
+
+	Returns:
+	A sparse matrix of type scipy.sparse.coo_matrix.
+	"""
+	cols = []
+	rows = []
+	data = []
+	nx, ny = mat.shape
+	if not isinstance(offset, (int, np.integer)):
+		raise TypeError("Argument offset must be an integer")
+	if abs(offset) >= nblocks:
+		raise ValueError("Absolute value of argument offset must be smaller than nblocks")
+	if offset > 0:
+		rowidx = np.arange(offset, nblocks) * nx
+		colidx = np.arange(0, nblocks - offset) * ny
+	elif offset < 0:
+		rowidx = np.arange(0, nblocks + offset) * nx
+		colidx = np.arange(-offset, nblocks) * ny
+	else:
+		rowidx = np.arange(0, nblocks) * nx
+		colidx = np.arange(0, nblocks) * ny
+	ndata = len(rowidx)
+	if issparse(mat):
+		coomat = mat.tocoo()
+		for i, j, v in zip(coomat.row, coomat.col, coomat.data):
+			rows.append(i + rowidx)
+			cols.append(j + colidx)
+			data.append(np.full(ndata, v))
+	else:
+		for i in range(0, nx):
+			for j in range(0, ny):
+				if mat[i, j] != 0.0:
+					rows.append(i + rowidx)
+					cols.append(j + colidx)
+					data.append(np.full(nblocks, mat[i, j]))
+	if len(rows) == 0 or len(cols) == 0 or len(data) == 0:
+		return coo_matrix((nx * nblocks, ny * nblocks), dtype = mat.dtype)
+	rows = np.concatenate(rows)
+	cols = np.concatenate(cols)
+	data = np.concatenate(data)
+	return coo_matrix((data, (rows, cols)), shape = (nx * nblocks, ny * nblocks))
+
+### OBSERVABLES CLASS ###
+class Observable:
+	"""Observable object.
+
+	Attributes:
+	obsid          String. The observable id.
+	obsfun         Function (callable object) or None. None is appropriate for
+	               observables that are calculated elsewhere (i.e., not a
+	               function in observables.py).
+	obsfun_type    String, one of 'none', 'mat', 'params', 'params_magn',
+	               'eivec', 'kwds', and 'overlap'. This determines which
+	               arguments will be passed to obsfun and how the eigenvectors
+	               are applied.
+	unit_dimless   String or None. Unit of the dimensionless variety
+	               (unformatted).
+	unit_dimful    String or None. Unit of the dimensionful variety
+	               (unformatted).
+	dimful_qty     String or None. What quantity determines the scaling factor
+	               for conversion between dimensionful and dimensionless
+	               observable.
+	dimful_factor  Float or None. Scaling factor for conversion between
+	               dimensionful and dimensionless observable.
+	obsid_alias    String or list of strings. Alias(es) for the observable id.
+	str_dimless    Dict instance, whose keys are formatting styles and whose
+	               values are the string representations of the dimensionless
+	               observable in these styles.
+	str_dimful     Dict instance, whose keys are formatting styles and whose
+	               values are the string representations of the dimensionful
+	               observable in these styles.
+	minmax         List of 2 floats or None. If set, this determines the range of
+	               of the colour legends in the plots.
+	colordata      String or None. If set, which colormap should be used for this
+	               observable.
+	"""
+	def __init__(self, obsid, obsfun, obsfun_type = None, unit_dimless = None, unit_dimful = None, dimful_qty = None, obsid_alias = None, str_dimless = None, str_dimful = None, minmax = None, colordata = None):
+		##
+		if not isinstance(obsid, str):
+			raise TypeError("Argument obsid must be a string instance")
+		self.obsid = obsid
+		self.obsfun = obsfun  # TODO: test
+		if obsfun_type is None:
+			self.obsfun_type = "none" if obsfun is None else "mat"
+		elif obsfun_type in ['none', 'mat', 'mat_indexed', 'params', 'params_indexed', 'params_magn', 'eivec', 'kwds', 'overlap']:
+			self.obsfun_type = obsfun_type
+		else:
+			raise ValueError("Invalid value for argument 'obsfun_type'.")
+		if isinstance(unit_dimless, str) or unit_dimless is None:
+			self.unit_dimless = unit_dimless
+		else:
+			raise TypeError("Argument unit_dimless must be a string instance or None")
+		if dimful_qty is None:
+			self.dimful_qty = None
+			self.dimful_factor = 1.0
+		elif isinstance(dimful_qty, str):
+			self.dimful_qty = dimful_qty
+			self.dimful_factor = None
+		else:
+			raise TypeError("Argument dimful_qty must be a string instance or None")
+		if isinstance(unit_dimful, str) or unit_dimful is None:
+			self.unit_dimful = unit_dimful
+		else:
+			raise TypeError("Argument unit_dimful must be a string instance or None")
+		if obsid_alias is None:
+			self.obsid_alias = []
+		elif isinstance(obsid_alias, str):
+			self.obsid_alias = [obsid_alias]
+		elif isinstance(obsid_alias, list) and all(isinstance(alias, str) for alias in obsid_alias):
+			self.obsid_alias = obsid_alias
+		else:
+			raise TypeError("Argument obsid_alias must be a string or list of strings")
+		if str_dimless is None:
+			str_dimless = {}
+		elif not isinstance(str_dimless, dict):
+			raise TypeError("Argument str_dimless must be a dict instance or None")
+		if str_dimful is None:
+			str_dimful = {}
+		elif not isinstance(str_dimful, dict):
+			raise TypeError("Argument str_dimful must be a dict instance or None")
+		self.str_dimless = str_dimless
+		if len(str_dimful) == 0 and len(str_dimless) > 0:
+			self.str_dimful = str_dimless
+		else:
+			self.str_dimful = str_dimful
+		if minmax is None:
+			self.minmax = [-1.0, 1.0]
+		elif isinstance(minmax, (int, float, np.integer, np.floating)):
+			self.minmax = [-abs(minmax), abs(minmax)]
+		elif isinstance(minmax, list) and len(minmax) == 2:
+			self.minmax = [float(minmax[0]), float(minmax[1])]
+		else:
+			raise TypeError("Argument minmax must be a number or a list of two numbers")
+		if colordata is None:
+			self.colordata = 'symmobs'  # default
+		elif isinstance(colordata, str):
+			self.colordata = colordata
+		else:
+			raise TypeError("Argument colordata must be a string instance or None")
+
+	def to_str(self, style = None, dimful = False, index_from = None):
+		"""Get string representation of the observable
+
+		Arguments:
+		style       String or None. If set, one of the formatting styles. If
+		            None, return the observable id.
+		dimful      True or False. Whether to use the dimensionful (True) or
+		            dimensionless (False) variety.
+		index_from  None or string. If set, extract a replacement value for '%i'
+		            from the string. This is applied to observables of types
+		            mat_indexed and params_indexed only.
+
+		Returns:
+		String.
+		"""
+		if dimful and isinstance(style, str) and style in self.str_dimful:
+			s = self.str_dimful[style]
+		elif not dimful and isinstance(style, str) and style in self.str_dimless:
+			s = self.str_dimless[style]
+		else:
+			s = self.obsid
+		if self.obsfun_type in ['mat_indexed', 'params_indexed']:
+			idx = None if index_from is None else get_index_from_obs_string(index_from)
+			if '%i' in s:
+				return s.replace('%i', '?') if idx is None else (s % idx)
+			elif '[]' in s:
+				return s.replace('[]', '[?]') if idx is None else s.replace('[]', '[%i]' % idx)
+			else:
+				return s
+		else:
+			return s
+
+	def get_unit(self, dimful = False):
+		"""Get unit of the observable (unformatted).
+
+		Arguments:
+		dimful  True or False. Whether to use the dimensionful (True) or
+		        dimensionless (False) variety.
+
+		Returns:
+		String.
+		"""
+		return self.unit_dimful if dimful and self.unit_dimful is not None else self.unit_dimless
+
+	def get_unit_str(self, style = None, dimful = False, negexp = True):
+		"""Get unit of the observable (formatted).
+
+		Arguments:
+		style   String or None. If set, one of the formatting styles. If None,
+		        return the observable id.
+		dimful  True or False. Whether to use the dimensionful (True) or
+		        dimensionless (False) variety.
+		negexp  True or False. If True, style quotients using negative exponents
+		        (e.g., 'm s^-1'). If False, use a slash notation (e.g., 'm/s').
+
+		Returns:
+		String.
+		"""
+		raw_unit_str = self.get_unit(dimful = dimful)
+		return format_unit(raw_unit_str, style = style, negexp = negexp)
+
+	def get_range(self, dimful = False):
+		"""Get minimum and maximum value for a colormap.
+
+		Argument:
+		dimful  True or False. Whether to use the dimensionful (True) or
+		        dimensionless (False) variety.
+
+		Returns:
+		List of two numbers.
+		"""
+		if dimful:
+			if self.dimful_factor is None:
+				sys.stderr.write("Warning (Observable.get_range): Dimensional factor has not been initialized (for observable %s).\n" % self.obsid)
+				return self.minmax
+			return [self.minmax[0] * self.dimful_factor, self.minmax[1] * self.dimful_factor]
+		else:
+			return self.minmax
+
+	def __str__(self):
+		"""String: Observable id"""
+		return self.obsid
+
+	def __repr__(self):
+		"""Representation with type 'Observable'"""
+		return "<Observable '%s'>" % self.obsid
+
+	def set_dimful_factor(self, param = None, value = None):
+		"""Set dimensional factor for conversion between dimensionless and dimensionful observable.
+
+		Arguments:
+		param   PhysParams instance. Set conversion factor by extracting the
+		        value from the PhysParams instance based on the string that is
+		        set in self.dimful_qty.
+		value   Float. Set conversion factor to this value.
+
+		Note:
+		Either param or value should be set, but not both.
+
+		Returns:
+		self.dimful_factor   Value of the conversion factor.
+		"""
+		if value is not None:
+			if param is not None:
+				raise ValueError("Either argument 'param' or argument 'value' must be specified, not both.")
+			if self.dimful_qty is not None or self.dimful_factor is not None:
+				pass  # show warning
+			if not isinstance(value, (int, float, np.integer, np.floating)):
+				raise TypeError("Argument 'value' must be numeric")
+			self.dimful_factor = float(value)
+			self.dimful_qty = 'value'
+		elif param is not None:
+			if not isinstance(param, PhysParams):
+				raise TypeError("Argument 'param' must be a PhysParams instance.")
+			if self.dimful_qty is None:
+				self.dimful_factor = 1.0
+				return 1.0
+			paramdict = param.to_dict()
+			# Parse values and parameters
+			matches = re.findall(r"\s*([/\*]?)\s*([0-9.e+-]+|[a-z_]+)(\s*(\^|\*\*)\s*([+-]?[0-9]+))?", self.dimful_qty.lower())
+			self.dimful_factor = 1.0
+			if matches is None or len(matches) == 0:
+				sys.stderr.write("Warning (Observable.set_dimful_factor): Attribute 'dimful_qty' has invalid contents (for observable '%s').\n" % self.obsid)
+				return 1.0
+			for m in matches:
+				try:
+					value = float(m[1])
+				except:
+					if m[1] in paramdict:
+						try:
+							value = float(paramdict[m[1]])
+						except:
+							value = 1.0
+							sys.stderr.write("Warning (Observable.set_dimful_factor): Parameter '%s' is not numeric (for observable '%s').\n" % (m[1], self.obsid))
+					else:
+						sys.stderr.write("Warning (Observable.set_dimful_factor): '%s' is neither a value nor a valid parameter name (for observable '%s').\n" % (m[1], self.obsid))
+						self.dimful_factor = 1.0
+						return 1.0
+				power = int(m[4]) if m[3] in ['**', '^'] else 1
+				if m[0] == '/':
+					power *= -1
+				self.dimful_factor *= (value ** power)
+				# print (value, "**", power, "=", value ** power, "-->", self.dimful_factor)
+		else:
+			raise ValueError("Either argument 'param' or argument 'value' must be specified.")
+		return self.dimful_factor
+
+
+class ObservableList:
+	"""Container class for Observable instances.
+
+	Attributes:
+	observables   List of Observable instances.
+	obsids        List of strings. The observable ids in the same order as
+	              observables.
+	obsids_alias  Dict instance of the form {alias: obs, ...}, where alias is a
+	              a string and obs is an Observable instance.
+	dimful        True, False, or None. Whether to globally consider
+	              dimensionful (True) or dimensionless (False) observables. None
+	              means undefined.
+	"""
+	def __init__(self, observables):
+		if not isinstance(observables, list):
+			raise TypeError("Argument for ObservableList must be a list of Observable instances")
+		if len(observables) > 1 and not all([isinstance(obs, Observable) for obs in observables]):
+			raise TypeError("Argument for ObservableList must be a list of Observable instances")
+		self.observables = observables
+		self.obsids = [obs.obsid for obs in self.observables]
+		self.obsids_alias = {}
+		for obs in self.observables:
+			for alias in obs.obsid_alias:
+				self.obsids_alias[alias] = obs.obsid
+		self.dimful = None
+
+	def __getitem__(self, key):
+		"""Get Observable instance by index (key is int) or observable id (key is str)."""
+		if isinstance(key, int):
+			return self.observables[key]
+		elif isinstance(key, str):
+			if '[' in key and ']' in key:  # handle indexed observables
+				m = re.match(indexed_obs_regex, key)
+				if m is not None:
+					key = m.group(1) + '[]'
+			if key in self.obsids:
+				idx = self.obsids.index(key)
+				return self.observables[idx]
+			elif key in self.obsids_alias:
+				idx = self.obsids.index(self.obsids_alias[key])
+				return self.observables[idx]
+			else:
+				raise KeyError
+		else:
+			raise TypeError
+
+	def __iter__(self):
+		return iter(self.observables)
+
+	def __len__(self):
+		return len(self.observables)
+
+	def __contains__(self, item):
+		"""The 'in' operator. The item can be an Observable instance or string (observable id)."""
+		if isinstance(item, Observable):
+			return item in self.observables
+		elif isinstance(item, str):
+			if '[' in item and ']' in item:  # handle indexed observables
+				m = re.match(indexed_obs_regex, item)
+				if m is not None:
+					item = m.group(1) + '[]'
+			return item in self.obsids or item in self.obsids_alias
+		else:
+			raise TypeError
+
+	def append(self, obs):
+		"""Add an Observable instance"""
+		if not isinstance(obs, Observable):
+			raise TypeError
+		if obs.obsid in self.obsids:
+			sys.stderr.write("Warning (ObservableList.append): Cannot add an observable with duplicate obsid '%s'.\n" % obs.obsid)
+		self.obsids.append(obs.obsid)
+		for alias in obs.obsid_alias:
+			self.obsids_alias[alias] = obs.obsid
+
+	def extend(self, other):
+		"""Extend present instance by another ObservableList instance or by a list of Observable instances."""
+		if isinstance(other, ObservableList) or (isinstance(other, list) and all(isinstance(o, Observable) for o in other)):
+			for obs in other:
+				self.append(obs)  # not the most efficient, but safe
+		else:
+			raise TypeError("Second argument must be a list of Observables or an ObservableList.")
+
+	def __iadd__(self, other):
+		self.extend(other)
+		return self
+
+	def set_dimful_factor(self, param = None, value = None):
+		"""Set dimensionful factor for all observables.
+		See Observable.set_dimful_factor() for more information.
+		"""
+		return [obs.set_dimful_factor(param = param, value = value) for obs in self.observables]
+
+	def get_dim_factor(self, obs = None, dimful = None):
+		"""Get dimensionful factor.
+
+		Arguments:
+		obs     Integer, string, or None. If integer, get the value for the
+		        observable at that index. If string, get the value for the
+		        observable with that observable id. If None, get a list of
+		        values for all observables.
+		dimful  True, False, or None. Get the value for dimensionful observables
+		        (True) or dimensionless observables (False; always yields 1.0).
+		        If None, use the value self.dimful set in the present
+		        ObservableList instance.
+
+		Returns:
+		Float or list of floats.
+		"""
+		if dimful is None:
+			dimful = self.dimful
+		if obs is None:
+			return [o.dimful_factor if dimful else 1.0 for o in self.observables]
+		elif obs in self:
+			o = self.__getitem__(obs)
+			return o.dimful_factor if dimful else 1.0
+		else:
+			return 1.0
+
+	def initialize(self, param = None, dimful = None):
+		"""Initialize the present ObservableList instance.
+		This initializes the dimensionful factors and sets the dimful attribute.
+
+		Arguments:
+		param   PhysParams instance. Extract conversion factors from this
+		        PhysParams instance. See Observable.set_dimful_factor() for more
+		        information.
+		"""
+		if dimful is True or dimful is False:
+			self.dimful = dimful
+		elif dimful is None:
+			sys.stderr.write("Warning (ObservableList.initialize): Attribute 'dimful' is set to default value False.\n")
+			self.dimful = False
+		self.set_dimful_factor(param = param)
+
+### OBSERVABLE FUNCTIONS ###
+
+def obs_y(nz, ny, norb = 6):
+	"""Observable <y>, function type 'none'."""
+	y = np.arange(0, ny, dtype = float) / (ny - 1) - 0.5
+	diag = np.repeat(y, norb * nz)
+	loc = 0
+	return dia_matrix((np.array([diag]), loc), shape = (norb * ny * nz, norb * ny * nz)).tocsc()
+
+def obs_y2(nz, ny, norb = 6):
+	"""Observable <y^2>, function type 'none'."""
+	y = np.arange(0, ny, dtype = float) / (ny - 1) - 0.5
+	diag = np.repeat(y**2, norb * nz)
+	loc = 0
+	return dia_matrix((np.array([diag]), loc), shape = (norb * ny * nz, norb * ny * nz)).tocsc()
+
+def obs_z(nz, ny, norb = 6):
+	"""Observable <z>, function type 'none'."""
+	z = np.arange(0, nz, dtype = float) / (nz - 1) - 0.5
+	diag = np.tile(np.repeat(z, norb), ny)
+	loc = 0
+	return dia_matrix((np.array([diag]), loc), shape = (norb * ny * nz, norb * ny * nz)).tocsc()
+
+def obs_z2(nz, ny, norb = 6):
+	"""Observable <z^2>, function type 'none'."""
+	z = np.arange(0, nz, dtype = float) / (nz - 1) - 0.5
+	diag = np.tile(np.repeat(z**2, norb), ny)
+	loc = 0
+	return dia_matrix((np.array([diag]), loc), shape = (norb * ny * nz, norb * ny * nz)).tocsc()
+
+def obs_z_if(nz, ny, params):
+	"""Observable <z_interface>, function type 'params'."""
+	z_if1, z_if2 = params.well_z()
+	norb = params.norbitals
+	if z_if1 is None or z_if2 is None:
+		return csc_matrix((norb * ny * nz, norb * ny * nz))  # zero matrix
+	z = np.arange(0, nz, dtype = float)
+	z_if = np.minimum(z - z_if1, z_if2 - z) / (nz - 1)
+	diag = np.tile(np.repeat(z_if, norb), ny)
+	loc = 0
+	return dia_matrix((np.array([diag]), loc), shape = (norb * ny * nz, norb * ny * nz)).tocsc()
+
+def obs_z_if2(nz, ny, params):
+	"""Observable <z_interface^2>, function type 'params'."""
+	z_if1, z_if2 = params.well_z()
+	norb = params.norbitals
+	if z_if1 is None or z_if2 is None:
+		return csc_matrix((norb * ny * nz, norb * ny * nz))  # zero matrix
+	z = np.arange(0, nz, dtype = float)
+	z_if = np.minimum(z - z_if1, z_if2 - z) / (nz - 1)
+	diag = np.tile(np.repeat(z_if**2, norb), ny)
+	loc = 0
+	return dia_matrix((np.array([diag]), loc), shape = (norb * ny * nz, norb * ny * nz)).tocsc()
+
+def in_zrange(nz, ny, z1, z2, norb = 6):
+	"""Helper function for defining an observable for getting probability in a range (z1, z2).
+
+	Arguments:
+	nz    Integer. Number of lattice points in the z direction. Extract this
+	      from a PhysParams instance.
+	ny    Integer. Number of lattice points in the y direction. Extract this
+	      from a PhysParams instance.
+	z1    Integer. Coordinate in lattice points of the lower bound of the
+	      interval.
+	z2    Integer. Coordinate in lattice points of the upper bound of the
+	      interval.
+	norb  Integer. Number of orbitals.
+
+	Returns:
+	A scipy.sparse.dia_matrix() instance.
+	"""
+	if z1 is None or z2 is None:
+		return csc_matrix((norb * ny * nz, norb * ny * nz))  # zero matrix
+	z = np.arange(0, nz, dtype = float)
+	z_in_well = np.where((z >= z1) & (z <= z2), np.ones_like(z), np.zeros_like(z))
+	diag = np.tile(np.repeat(z_in_well, norb), ny)
+	loc = 0
+	return dia_matrix((np.array([diag]), loc), shape = (norb * ny * nz, norb * ny * nz)).tocsc()
+
+def near_z(nz, ny, zval, d, norb = 6, relative = False):
+	"""Helper function for defining an observable for getting probability near z.
+	'Near z', means the interval [zval - d, zval + d].
+
+	Arguments:
+	nz        Integer. Number of lattice points in the z direction. Extract this
+	          from a PhysParams instance.
+	ny        Integer. Number of lattice points in the y direction. Extract this
+	          from a PhysParams instance.
+	zval      Integer. Coordinate in lattice points of the center of the
+	          interval.
+	d         Integer. Width of the interval in lattice points.
+	norb      Integer. Number of orbitals.
+	relative  True or False. If False, get an observable for the probability
+	          density. If True, get an observable for the probability density
+	          divided by the uniform probability density.
+
+	Returns:
+	A scipy.sparse.dia_matrix() instance.
+	"""
+	if isinstance(zval, (int, float, np.integer, np.floating)):
+		zval = [zval]
+	z = np.arange(0, nz, dtype = float)
+	open_set = np.any([np.abs(z - z0) < d for z0 in zval], axis = 0)
+	edges = np.any([np.abs(z - z0) == d for z0 in zval], axis = 0)
+	near_z = np.where(open_set, np.ones_like(z), np.zeros_like(z))
+	near_z += 0.5 * np.where(edges & ~open_set, np.ones_like(z), np.zeros_like(z))
+	if relative:
+		div = np.sum(near_z) / nz
+		if div != 0.0:
+			near_z /= div
+	diag = np.tile(np.repeat(near_z, norb), ny)
+	loc = 0
+	return dia_matrix((np.array([diag]), loc), shape = (norb * ny * nz, norb * ny * nz)).tocsc()
+
+def obs_well(nz, ny, params):
+	"""Observable <well>, function type 'params'."""
+	z_if1, z_if2 = params.well_z()
+	norb = params.norbitals
+	return csc_matrix((norb * ny * nz, norb * ny * nz)) if z_if1 is None or z_if2 is None else in_zrange(nz, ny, z_if1, z_if2, norb)
+
+def obs_wellext(nz, ny, params):
+	"""Observable <well +/- 2 nm>, function type 'params'."""
+	z_if1, z_if2 = params.well_z(extend_nm = 2.0)
+	norb = params.norbitals
+	return csc_matrix((norb * ny * nz, norb * ny * nz)) if z_if1 is None or z_if2 is None else in_zrange(nz, ny, z_if1, z_if2, norb)
+
+def obs_interface_1nm(nz, ny, params):
+	"""Observable 'interface density', 1 nm, function type 'params'."""
+	return near_z(nz, ny, params.zinterface, 1.0 / params.zres, norb = params.norbitals, relative = False)  # d = 1.0 / params.zres
+
+def obs_interface_char_1nm(nz, ny, params):
+	"""Observable 'interface character', 1 nm, function type 'params'."""
+	return near_z(nz, ny, params.zinterface, 1.0 / params.zres, norb = params.norbitals, relative = True)  # d = 1.0 / params.zres
+
+def obs_interface_10nm(nz, ny, params):
+	"""Observable 'interface density', 10 nm, function type 'params'."""
+	return near_z(nz, ny, params.zinterface, 10.0 / params.zres, norb = params.norbitals, relative = False)  # d = 10.0 / params.zres
+
+def obs_interface_char_10nm(nz, ny, params):
+	"""Observable 'interface character', 10 nm, function type 'params'."""
+	return near_z(nz, ny, params.zinterface, 10.0 / params.zres, norb = params.norbitals, relative = True)  # d = 10.0 / params.zres
+
+def obs_interface_custom(nz, ny, params, length):
+	"""Observable 'interface density', 10 nm, function type 'params'."""
+	return near_z(nz, ny, params.zinterface, length / params.zres, norb = params.norbitals, relative = False)  # d = 10.0 / params.zres
+
+def obs_interface_char_custom(nz, ny, params, length):
+	"""Observable 'interface character', 10 nm, function type 'params'."""
+	return near_z(nz, ny, params.zinterface, length / params.zres, norb = params.norbitals, relative = True)  # d = 10.0 / params.zres
+
+def obs_split(nz, ny, norb = 6):
+	"""Observable <H_split>, function type 'none'."""
+	diag = np.tile(np.array([1., -1., 1., 1., -1., -1., 1., -1.]), ny * nz) if norb == 8 else np.tile(np.array([1., -1., 1., 1., -1., -1.]), ny * nz)
+	loc = 0
+	return dia_matrix((np.array([diag]), loc), shape = (norb * ny * nz, norb * ny * nz)).tocsc()
+
+def obs_totalspinz(nz, ny, norb = 6):
+	"""Observable <Jz>, function type 'none'."""
+	diag = np.tile(np.array([0.5, -0.5, 1.5, 0.5, -0.5, -1.5, 0.5, -0.5]), ny * nz) if norb == 8 else np.tile(np.array([0.5, -0.5, 1.5, 0.5, -0.5, -1.5]), ny * nz)
+	loc = 0
+	return dia_matrix((np.array([diag]), loc), shape = (norb * ny * nz, norb * ny * nz)).tocsc()
+
+def obs_totalspinx(nz, ny, norb = 6):
+	"""Observable <Jx>, function type 'none'."""
+	return blockdiag(spinmat.jxmat[:norb, :norb], ny * nz).tocsc()
+
+def obs_totalspiny(nz, ny, norb = 6):
+	"""Observable <Jy>, function type 'none'."""
+	return blockdiag(spinmat.jymat[:norb, :norb], ny * nz).tocsc()
+
+def obs_properspinz(nz, ny, norb = 6):
+	"""Observable <Sz>, function type 'none'."""
+	return blockdiag(spinmat.szmat[:norb, :norb], ny * nz).tocsc()
+
+def obs_properspinx(nz, ny, norb = 6):
+	"""Observable <Sx>, function type 'none'."""
+	return blockdiag(spinmat.sxmat[:norb, :norb], ny * nz).tocsc()
+
+def obs_properspiny(nz, ny, norb = 6):
+	"""Observable <Sy>, function type 'none'."""
+	return blockdiag(spinmat.symat[:norb, :norb], ny * nz).tocsc()
+
+def obs_signspinz(nz, ny, norb = 6):
+	"""Observable <sgn(Sz)>, function type 'none'."""
+	diag = np.tile(np.array([1.0, -1.0, 1.0, 1.0, -1.0, -1.0, 1.0, -1.0]), ny * nz) if norb == 8 else np.tile(np.array([1.0, -1.0, 1.0, 1.0, -1.0, -1.0]), ny * nz)
+	loc = 0
+	return dia_matrix((np.array([diag]), loc), shape = (norb * ny * nz, norb * ny * nz)).tocsc()
+
+def obs_spinz6(nz, ny, norb = 6):
+	"""Observable <Jz P_Gamma6>, function type 'none'."""
+	diag = np.tile(np.array([0.5, -0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]), ny * nz) if norb == 8 else np.tile(np.array([0.5, -0.5, 0.0, 0.0, 0.0, 0.0]), ny * nz)
+	loc = 0
+	return dia_matrix((np.array([diag]), loc), shape = (norb * ny * nz, norb * ny * nz)).tocsc()
+
+def obs_spinz8(nz, ny, norb = 6):
+	"""Observable <Jz P_Gamma8>, function type 'none'."""
+	diag = np.tile(np.array([0.0, 0.0, 1.5, 0.5, -0.5, -1.5, 0.0, 0.0]), ny * nz) if norb == 8 else np.tile(np.array([0.0, 0.0, 1.5, 0.5, -0.5, -1.5]), ny * nz)
+	loc = 0
+	return dia_matrix((np.array([diag]), loc), shape = (norb * ny * nz, norb * ny * nz)).tocsc()
+
+def obs_spinz7(nz, ny, norb = 6):
+	"""Observable <Jz P_Gamma7>, function type 'none'."""
+	diag = np.tile(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5, -0.5]), ny * nz) if norb == 8 else np.tile(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0]), ny * nz)
+	loc = 0
+	return dia_matrix((np.array([diag]), loc), shape = (norb * ny * nz, norb * ny * nz)).tocsc()
+
+def obs_y_spinz(nz, ny, norb = 6):
+	"""Observable <y Jz>, function type 'none'."""
+	y = np.arange(0, ny, dtype = float) / (ny - 1) - 0.5
+	spinz = np.tile(np.array([0.5, -0.5, 1.5, 0.5, -0.5, -1.5, 0.5, -0.5]), nz) if norb == 8 else np.tile(np.array([0.5, -0.5, 1.5, 0.5, -0.5, -1.5]), nz)
+	diag = np.kron(y, spinz)
+	loc = 0
+	return dia_matrix((np.array([diag]), loc), shape = (norb * ny * nz, norb * ny * nz)).tocsc()
+
+def obs_orbital(nz, ny, norb = 6):
+	"""Observable <P_Gamma6 - P_Gamma8>, function type 'none'."""
+	diag = np.tile(np.array([1., 1., -1., -1., -1., -1., 0., 0.]), ny * nz) if norb == 8 else np.tile(np.array([1., 1., -1., -1., -1., -1.]), ny * nz)
+	loc = 0
+	return dia_matrix((np.array([diag]), loc), shape = (norb * ny * nz, norb * ny * nz)).tocsc()
+
+def obs_orbital_gamma6(nz, ny, norb = 6):
+	"""Observable <P_Gamma6>, function type 'none'."""
+	diag = np.tile(np.array([1., 1., 0., 0., 0., 0., 0., 0.]), ny * nz) if norb == 8 else np.tile(np.array([1., 1., 0., 0., 0., 0.]), ny * nz)
+	loc = 0
+	return dia_matrix((np.array([diag]), loc), shape = (norb * ny * nz, norb * ny * nz)).tocsc()
+
+def obs_orbital_gamma8(nz, ny, norb = 6):
+	"""Observable <P_Gamma8>, function type 'none'."""
+	diag = np.tile(np.array([0., 0., 1., 1., 1., 1., 0., 0.]), ny * nz) if norb == 8 else np.tile(np.array([0., 0., 1., 1., 1., 1.]), ny * nz)
+	loc = 0
+	return dia_matrix((np.array([diag]), loc), shape = (norb * ny * nz, norb * ny * nz)).tocsc()
+
+def obs_orbital_gamma8h(nz, ny, norb = 6):
+	"""Observable <P_Gamma8H>, function type 'none'."""
+	diag = np.tile(np.array([0., 0., 1.0, 0., 0., 1.0, 0., 0.]), ny * nz) if norb == 8 else np.tile(np.array([0., 0., 1.0, 0., 0., 1.0]), ny * nz)
+	loc = 0
+	return dia_matrix((np.array([diag]), loc), shape = (norb * ny * nz, norb * ny * nz)).tocsc()
+
+def obs_orbital_gamma8l(nz, ny, norb = 6):
+	"""Observable <P_Gamma8L>, function type 'none'."""
+	diag = np.tile(np.array([0., 0., 0., 1.0, 1.0, 0., 0., 0.]), ny * nz) if norb == 8 else np.tile(np.array([0., 0., 0., 1.0, 1.0, 0.]), ny * nz)
+	loc = 0
+	return dia_matrix((np.array([diag]), loc), shape = (norb * ny * nz, norb * ny * nz)).tocsc()
+
+def obs_orbital_gamma7(nz, ny, norb = 6):
+	"""Observable <P_Gamma7>, function type 'none'."""
+	diag = np.tile(np.array([0., 0., 0., 0., 0., 0., 1., 1.]), ny * nz) if norb == 8 else np.tile(np.array([0., 0., 0., 0., 0., 0.]), ny * nz)
+	loc = 0
+	return dia_matrix((np.array([diag]), loc), shape = (norb * ny * nz, norb * ny * nz)).tocsc()
+
+def obs_orbital_j(nz, ny, norb, j):
+	"""Observable <P_orbital(j)>; function type 'mat_indexed'."""
+	if j < 1 or j > norb:
+		sys.stderr.write("ERROR (obs_orbital_j): Band index out of range [1, ..., norb]\n")
+	uvec = np.zeros(norb)
+	uvec[j - 1] = 1.0
+	diag = np.tile(uvec, ny * nz)
+	loc = 0
+	return dia_matrix((np.array([diag]), loc), shape = (norb * ny * nz, norb * ny * nz)).tocsc()
+
+def obs_hdiag(h_block, nz, ny, params, magn = None):
+	"""Helper function for block-diagonal observable matrix
+
+	Arguments:
+	h_block   Callable from hamiltonian.blocks.
+	nz        NOT USED
+	ny        Integer. Number of lattice points in y direction.
+	params    PhysParams instance.
+	magn      Float, Vector instance or None. If None, ignore, otherwise pass it
+	          as keyword argument to h_block.
+
+	Returns:
+	A scipy.sparse.csc_matrix instance. The full matrix that can be used as
+	observable.
+	"""
+	if magn is None:
+		block = hz_block_diag(h_block, params)
+	else:
+		block = hz_block_diag(h_block, params, magn = magn)
+	return blockdiag(block, ny).tocsc()
+
+def obs_hexch(nz, ny, params, magn):
+	"""Observable <H_exch>, function type 'params_magn'."""
+	return obs_hdiag(hexchange, nz, ny, params, magn = magn)
+
+def obs_hexch1t(nz, ny, params):
+	"""Observable <H_exch> at 1T (in z direction), function type 'params'."""
+	return obs_hdiag(hexchange, nz, ny, params, magn = 1.0)
+
+def obs_hexchinf(nz, ny, params):
+	"""Observable <H_exch> in large field limit (in z direction), function type 'params'."""
+	return obs_hdiag(hexchange, nz, ny, params, magn = np.inf)
+
+def obs_hzeeman(nz, ny, params, magn):
+	"""Observable <H_zeeman>, function type 'params_magn'."""
+	return obs_hdiag(hzeeman, nz, ny, params, magn = magn)
+
+def obs_hzeeman1t(nz, ny, params):
+	"""Observable <H_zeeman> at 1T (in z direction), function type 'params'."""
+	return obs_hdiag(hzeeman, nz, ny, params, magn = 1.0)
+
+def obs_hstrain(nz, ny, params):
+	"""Observable <H_strain>, function type 'params'."""
+	return obs_hdiag(hstrain, nz, ny, params)
+
+def obs_llindex(nz, ny, norb):
+	"""Observable <LL index> (for full LL mode), function type 'none'."""
+	llindex = np.arange(0, ny) - 2
+	diag = np.repeat(llindex, nz * norb)
+	loc = 0
+	return dia_matrix((np.array([diag]), loc), shape = (norb * ny * nz, norb * ny * nz)).tocsc()
+
+def obs_llindex_mod2(nz, ny, norb):
+	"""Observable <LL index mod 2> (for full LL mode), function type 'none'."""
+	llindex = np.mod(np.arange(0, ny) - 2, 2)
+	diag = np.repeat(llindex, nz * norb)
+	loc = 0
+	return dia_matrix((np.array([diag]), loc), shape = (norb * ny * nz, norb * ny * nz)).tocsc()
+
+def obs_llindex_mod4(nz, ny, norb):
+	"""Observable <LL index mod 4> (for full LL mode), function type 'none'."""
+	llindex = np.mod(np.arange(0, ny) - 2, 4)
+	diag = np.repeat(llindex, nz * norb)
+	loc = 0
+	return dia_matrix((np.array([diag]), loc), shape = (norb * ny * nz, norb * ny * nz)).tocsc()
+
+def obs_ll_j(nz, ny, norb, j):
+	"""Observable <P_ll(j)> (for full LL mode); undefined function type - NOT USED"""
+	if j < -2 or j > ny - 3:  # ll_max = ny - 3
+		sys.stderr.write("ERROR (obs_llindex_j): LL index out of range [-2, ..., llmax]\n")
+	uvec = np.zeros(ny, dtype = float)
+	uvec[j + 2] = 1.0
+	diag = np.repeat(uvec, nz * norb)
+	loc = 0
+	return dia_matrix((np.array([diag]), loc), shape = (norb * ny * nz, norb * ny * nz)).tocsc()
+
+def llindex_max(eivec, nz, ny, norb):
+	"""Observable LL index 'by maximum', function type 'eivec'."""
+	size = nz * norb  # 'average' LL index
+	ll_overlap = np.array([np.dot(eivec[size * l:size * (l+1)].conjugate(), eivec[size * l:size * (l+1)]) for l in range(0, ny)])  # ll_max = ny - 3
+	return np.argmax(np.abs(ll_overlap)) - 2
+
+def llindex_kwds(nz, ny, llindex = None, **kwds):
+	"""Observable LL index, function type 'kwds'."""
+	if llindex is None:
+		raise ValueError
+	return llindex
+
+# IPR-like quantities
+# The inverse participation ratio (IPR) is defined in terms of the second and
+# fourth moment (m2 and m4, respectively) of the spatial wave functions,
+# basically m2**2 / m4.
+# Here, we provide a scale and resolution invariant definition. The results are
+# dimensionless by definition, but may be multiplied by the sample size (length
+# for iprz and ipry, area for ipryz) to get a dimensionful physical quantity.
+# Note that here, we (should) always have m2 = 1.
+def ipr_z(eivec, nz, ny, norb):
+	"""Observable IPR_z, function type 'eivec'."""
+	eivec2 = eivec.conjugate() * eivec  # Not a matrix multiplication!
+	eivec2z = np.sum(np.sum(eivec2.reshape(ny, nz, norb), axis = 2), axis = 0)
+	m2 = np.sum(eivec2z)
+	m4 = np.sum(eivec2z**2)
+	return m2**2 / m4 / nz
+
+def ipr_y(eivec, nz, ny, norb):
+	"""Observable IPR_y, function type 'eivec'."""
+	eivec2 = eivec.conjugate() * eivec  # Not a matrix multiplication!
+	eivec2y = np.sum(np.sum(eivec2.reshape(ny, nz, norb), axis = 2), axis = 1)
+	m2 = np.sum(eivec2y)
+	m4 = np.sum(eivec2y**2)
+	return m2**2 / m4 / ny
+
+def ipr_yz(eivec, nz, ny, norb):
+	"""Observable IPR_yz, function type 'eivec'."""
+	eivec2 = eivec.conjugate() * eivec  # Not a matrix multiplication!
+	eivec2yz = np.sum(eivec2.reshape(ny * nz, norb), axis = 1)
+	m2 = np.sum(eivec2yz)
+	m4 = np.sum(eivec2yz**2)
+	return m2**2 / m4 / ny / nz
+
+### Derived parity functions
+# parity_{x,y,z}() are taken from hamiltonian/parity.py
+def isoparity_z(par1, par2 = None, norb = 6):
+	"""Isoparity in z. See hamiltonian/parity.py for more information."""
+	return parity_z(par1, par2, norb, isoparity = True)
+
+def isoparity_z_well(nz, ny, params):
+	"""Isoparity in z applied to the well only. See hamiltonian/parity.py for more information."""
+	norb = params.norbitals
+	z_if1, z_if2 = params.well_z()
+	if z_if1 is None or z_if2 is None:
+		return csc_matrix((norb * ny * nz, norb * ny * nz))  # zero matrix
+	return parity_z(nz, ny, norb, isoparity = True, zrange = (z_if1, z_if2))
+
+def isoparity_z_symm(nz, ny, params):
+	"""Isoparity in z applied to a symmetric region around the well only. See hamiltonian/parity.py for more information."""
+	norb = params.norbitals
+	z_if1, z_if2 = params.symmetric_z()
+	if z_if1 is None or z_if2 is None:
+		return csc_matrix((norb * ny * nz, norb * ny * nz))  # zero matrix
+	return parity_z(nz, ny, norb, isoparity = True, zrange = (z_if1, z_if2))
+
+def isoparity_x(par1, par2 = None, norb = 6):
+	"""Isoparity in x. See hamiltonian/parity.py for more information."""
+	return parity_x(par1, par2, norb, isoparity = True)
+
+def isoparity_y(par1, par2 = None, norb = 6):
+	"""Isoparity in y. See hamiltonian/parity.py for more information."""
+	return parity_y(par1, par2, norb, isoparity = True)
+
+def parity_zy(par1, par2 = None, norb = 6):
+	"""Parity in z and y. See hamiltonian/parity.py for more information.
+	The result is calculated through matrix multiplication.
+	"""
+	return parity_z(par1, par2, norb, isoparity = False) @ parity_y(par1, par2, norb, isoparity = False)
+
+def isoparity_zy(par1, par2 = None, norb = 6):
+	"""Isoparity in z and y. See hamiltonian/parity.py for more information.
+	The result is calculated through matrix multiplication.
+	"""
+	return parity_z(par1, par2, norb, isoparity = True) @ parity_y(par1, par2, norb, isoparity = True)
+
+### OBSERVABLE DEFINITIONS ###
+all_observables = ObservableList([
+	Observable(
+		'y', obs_y, unit_dimful = 'nm', dimful_qty = 'w',
+		minmax = [-0.5, 0.5], colordata = 'symmobs',
+		str_dimless = {'plain': "y/w", 'tex': r"$\langle y\rangle/w$", 'unicode': "\u27e8y\u27e9/w"},
+		str_dimful = {'plain': "y", 'tex': r"$\langle y\rangle$", 'unicode': "\u27e8y\u27e9"}),
+	Observable(
+		'y2', obs_y2, unit_dimful = 'nm^2', dimful_qty = 'w^2',
+		minmax = [0.0, 0.25], colordata = 'posobs',
+		obsid_alias = "y^2",
+		str_dimless = {'plain': "(y/w)^2", 'tex': r"$\langle y^2\!\rangle/w^2$", 'unicode': "\u27e8y\xb2\u27e9/w\xb2"},
+		str_dimful = {'plain': "y^2", 'tex': r"$\langle y^2\!\rangle$", 'unicode': "\u27e8y\xb2\u27e9"}),
+	Observable(
+		'sigmay', None, unit_dimful = 'nm^2', dimful_qty = 'w',
+		minmax = [0.0, 0.5], colordata = 'posobs',
+		obsid_alias = "sigma_y",
+		str_dimless = {'plain': "sigma_y/w", 'tex': r"$\sigma_y/w$", 'unicode': "\u03c3_y/w"},
+		str_dimful = {'plain': "sigma_y", 'tex': r"$\sigma_y$", 'unicode': "\u03c3_y"}),
+	Observable(
+		'z', obs_z, unit_dimful = 'nm', dimful_qty = 'd',
+		minmax = [-0.5, 0.5], colordata = 'symmobs',
+		str_dimless = {'plain': "z/d", 'tex': r"$\langle z\rangle/d$", 'unicode': "\u27e8z\u27e9/w"},
+		str_dimful = {'plain': "z", 'tex': r"$\langle z\rangle$", 'unicode': "\u27e8z\u27e9"}),
+	Observable(
+		'z2', obs_z2, unit_dimful = 'nm^2', dimful_qty = 'd^2',
+		minmax = [0.0, 0.25], colordata = 'posobs',
+		obsid_alias = "z^2",
+		str_dimless = {'plain': "(z/d)^2", 'tex': r"$\langle z^2\!\rangle/d^2$", 'unicode': "\u27e8z\xb2\u27e9/d\xb2"},
+		str_dimful = {'plain': "z^2", 'tex': r"$\langle z^2\!\rangle$", 'unicode': "\u27e8z\xb2\u27e9/d\xb2"}),
+	Observable(
+		'sigmaz', None, unit_dimful = 'nm^2', dimful_qty = 'd',
+		minmax = [0.0, 0.5], colordata = 'posobs',
+		obsid_alias = "sigma_z",
+		str_dimless = {'plain': "sigma_z/d", 'tex': r"$\sigma_z/d$", 'unicode': "\u03c3_z/d"},
+		str_dimful = {'plain': "sigma_z", 'tex': r"$\sigma_z$", 'unicode': "\u03c3_z"}),
+	Observable(
+		'zif', obs_z_if, obsfun_type = 'params', unit_dimful = 'nm', dimful_qty = 'd',
+		minmax = [-0.5, 0.5], colordata = 'symmobs',
+		obsid_alias = "z_if",
+		str_dimless = {'plain': "z_if/d", 'tex': r"$\langle z_\mathrm{if}\rangle/d$", 'unicode': "\u27e8z_if\u27e9/d"},
+		str_dimful = {'plain': "z_if", 'tex': r"$\langle z_\mathrm{if}\rangle$", 'unicode': "\u27e8z_if\u27e9"}),
+	Observable(
+		'zif2', obs_z_if2, obsfun_type = 'params', unit_dimful = 'nm^2', dimful_qty = 'd^2',
+		minmax = [0.0, 0.25], colordata = 'posobs',
+		obsid_alias = ["z_if2", "zif^2", "z_if^2"],
+		str_dimless = {'plain': "(z_if/d)^2", 'tex': r"$\langle z_\mathrm{if}^2\!\rangle/d^2$", 'unicode': "\u27e8z_if\xb2\u27e9/d\xb2"},
+		str_dimful = {'plain': "z_if^2", 'tex': r"$\langle z_\mathrm{if}^2\!\rangle$", 'unicode': "\u27e8z_if\xb2\u27e9"}),
+	Observable(
+		'sigmazif', None, unit_dimful = 'nm^2', dimful_qty = 'w',
+		minmax = [0.0, 0.5], colordata = 'posobs',
+		obsid_alias = ['sigmaz_if', 'sigma_zif', 'sigma_z_if'],
+		str_dimless = {'plain': "sigma_zif/w", 'tex': r"$\sigma_{z_\mathrm{if}}/d$", 'unicode': "\u03c3_zif/d"},
+		str_dimful = {'plain': "sigma_zif", 'tex': r"$\sigma_{z_\mathrm{if}}$", 'unicode': "\u03c3_zif"}),
+	Observable(
+		'well', obs_well, obsfun_type = 'params',
+		minmax = [0.0, 1.0], colordata = 'posobs',
+		str_dimless = {'plain': "psi^2(well)", 'tex': r"$|\psi_{\mathrm{well}}|^2$", 'unicode': "|\u03c8_well|\xb2"}),  # alternative TeX: r"$\int_{\mathrm{well}}|\psi|^2 dz$"
+	Observable(
+		'wellext', obs_wellext, obsfun_type = 'params',
+		minmax = [0.0, 1.0], colordata = 'posobs',
+		obsid_alias = ["extwell", "ext_well", "well_ext"],
+		str_dimless = {'plain': "psi^2(well+2nm)", 'tex': r"$|\psi_{\mathrm{well}\pm2\,\mathrm{nm}}|^2$", 'unicode': "|\u03c8_well\xb12nm|\xb2"}),  # alternative TeX: r"$\int_{\mathrm{well}\pm 2\,\mathrm{nm}}|\psi|^2 dz$"
+	Observable(
+		'interface', obs_interface_1nm, obsfun_type = 'params',
+		minmax = [0.0, 1.0], colordata = 'posobs',
+		obsid_alias = ["interface1nm", "interface_1nm", "if1nm", "if_1nm"],
+		str_dimless = {'plain': "psi^2(if_1nm)", 'tex': r"$|\psi_{\mathrm{if},1\,\mathrm{nm}}|^2$", 'unicode': "|\u03c8_if|\xb2 (1nm)"}),
+	Observable(
+		'interfacechar', obs_interface_char_1nm, obsfun_type = 'params',
+		minmax = [0.0, 3.0], colordata = 'posobs',
+		obsid_alias = ["interfacechar1nm", "interface_char", "interface_char_1nm", "ifchar", "if_char", "ifchar1nm", "if_char_1nm"],
+		str_dimless = {'plain': "<psi^2(if_1nm)>", 'tex': r"$\langle |\psi_{\mathrm{if},1\,\mathrm{nm}}|^2\rangle$", 'unicode': "\u27e8|\u03c8_if|\xb2\u27e9 (1nm)"}),
+	Observable(
+		'interface10nm', obs_interface_10nm, obsfun_type = 'params',
+		minmax = [0.0, 1.0], colordata = 'posobs',
+		obsid_alias = ["interface10nm", "interface_10nm", "if10nm", "if_10nm"],
+		str_dimless = {'plain': "psi^2(if_10nm)", 'tex': r"$|\psi_{\mathrm{if},10\,\mathrm{nm}}|^2$", 'unicode': "|\u03c8_if|\xb2 (10nm)"}),
+	Observable(
+		'interfacechar10nm', obs_interface_char_10nm, obsfun_type = 'params',
+		minmax = [0.0, 3.0], colordata = 'posobs',
+		obsid_alias = ["interfacechar10nm", "interface_char_10nm", "ifchar", "if_char", "ifchar10nm", "if_char_10nm"],
+		str_dimless = {'plain': "<psi^2(if_10nm)>", 'tex': r"$\langle |\psi_{\mathrm{if},10\,\mathrm{nm}}|^2\rangle$", 'unicode': "\u27e8|\u03c8_if|\xb2\u27e9 (10nm)"}),
+	Observable(
+		'custominterface[]', obs_interface_custom, obsfun_type = 'params_indexed',
+		minmax = [0.0, 1.0], colordata = 'posobs',
+		str_dimless = {'plain': "psi^2(if_%inm)", 'tex': r"$|\psi_{\mathrm{if},%i\,\mathrm{nm}}|^2$",
+		               'unicode': "|\u03c8_if|\xb2 (%inm)"}),
+	Observable(
+		'custominterfacechar[]', obs_interface_char_custom, obsfun_type = 'params_indexed',
+		minmax = [0.0, 3.0], colordata = 'posobs',
+		str_dimless = {'plain': "<psi^2(if_%inm)>", 'tex': r"$\langle |\psi_{\mathrm{if},%i\,\mathrm{nm}}|^2\rangle$",
+		               'unicode': "\u27e8|\u03c8_if|\xb2\u27e9 (%inm)"}),
+	Observable(
+		'ipry', ipr_y, obsfun_type = 'eivec', unit_dimful = 'nm', dimful_qty = 'w',
+		minmax = [0.0, 1.0], colordata = 'ipr',
+		str_dimless = {'plain': "IPR_y", 'tex': r"$\mathrm{IPR}_y$", 'unicode': "IPR_y"},
+		str_dimful = {'plain': "IPR_y", 'tex': r"$\mathrm{IPR}_y$", 'unicode': "IPR_y"}),
+	Observable(
+		'iprz', ipr_z, obsfun_type = 'eivec', unit_dimful = 'nm', dimful_qty = 'd',
+		minmax = [0.0, 1.0], colordata = 'ipr',
+		str_dimless = {'plain': "IPR_z", 'tex': r"$\mathrm{IPR}_z$", 'unicode': "IPR_z"},
+		str_dimful = {'plain': "IPR_z", 'tex': r"$\mathrm{IPR}_z$", 'unicode': "IPR_z"}),
+	Observable(
+		'ipryz', ipr_yz, obsfun_type = 'eivec', unit_dimful = 'nm^2', dimful_qty = 'd*w',
+		minmax = [0.0, 1.0], colordata = 'ipr',
+		str_dimless = {'plain': "IPR_yz", 'tex': r"$\mathrm{IPR}_{(y,z)}$", 'unicode': "IPR_yz"},
+		str_dimful = {'plain': "IPR_yz", 'tex': r"$\mathrm{IPR}_{(y,z)}$", 'unicode': "IPR_yz"}),
+	Observable(
+		'sz', obs_properspinz,
+		minmax = [-0.5, 0.5], colordata = 'symmobs',
+		str_dimless = {'plain': "Sz", 'tex': r"$\langle S^z\!\rangle$", 'unicode': "\u27e8Sz\u27e9"}),
+	Observable(
+		'sx', obs_properspinx,
+		minmax = [-0.5, 0.5], colordata = 'symmobs',
+		str_dimless = {'plain': "Sx", 'tex': r"$\langle S^x\!\rangle$", 'unicode': "\u27e8Sx\u27e9"}),
+	Observable(
+		'sy', obs_properspiny,
+		minmax = [-0.5, 0.5], colordata = 'symmobs',
+		str_dimless = {'plain': "Sy", 'tex': r"$\langle S^y\!\rangle$", 'unicode': "\u27e8Sy\u27e9"}),
+	Observable(
+		'jz', obs_totalspinz,
+		minmax = [-1.5, 1.5], colordata = 'threehalves',
+		obsid_alias = 'spinz',
+		str_dimless = {'plain': "Jz", 'tex': r"$\langle J^z\!\rangle$", 'unicode': "\u27e8Jz\u27e9"}),
+	Observable(
+		'jx', obs_totalspinx,
+		minmax = [-1.5, 1.5], colordata = 'threehalves',
+		obsid_alias = 'spinx',
+		str_dimless = {'plain': "Jx", 'tex': r"$\langle J^x\!\rangle$", 'unicode': "\u27e8Jx\u27e9"}),
+	Observable(
+		'jy', obs_totalspiny,
+		minmax = [-1.5, 1.5], colordata = 'threehalves',
+		obsid_alias = 'spiny',
+		str_dimless = {'plain': "Jy", 'tex': r"$\langle J^y\!\rangle$", 'unicode': "\u27e8Jy\u27e9"}),
+	Observable(
+		'jz6', obs_spinz6,
+		minmax = [-1.5, 1.5], colordata = 'threehalves',
+		obsid_alias = 'spinz6',
+		str_dimless = {'plain': "Jz_Gamma6", 'tex': r"$\langle J^z P_{\Gamma_6}\!\rangle$", 'unicode': "\u27e8Jz P_\u03936\u27e9"}),
+	Observable(
+		'jz8', obs_spinz8,
+		minmax = [-1.5, 1.5], colordata = 'threehalves',
+		obsid_alias = 'spinz8',
+		str_dimless = {'plain': "Jz_Gamma8", 'tex': r"$\langle J^z P_{\Gamma_8}\!\rangle$", 'unicode': "\u27e8Jz P_\u03938\u27e9"}),
+	Observable(
+		'jz7', obs_spinz7,
+		minmax = [-1.5, 1.5], colordata = 'threehalves',
+		obsid_alias = 'spinz7',
+		str_dimless = {'plain': "Jz_Gamma7", 'tex': r"$\langle J^z P_{\Gamma_7}\!\rangle$", 'unicode': "\u27e8Jz P_\u03937\u27e9"}),
+	Observable(
+		'yjz', obs_y_spinz,
+		minmax = [-0.5, 0.5], colordata = 'symmobs',
+		obsid_alias = ["yspinz", "y spinz", "y jz", "y*spinz", "y*jz"],
+		str_dimless = {'plain': "y Jz", 'tex': r"$\langle y J^z\!\rangle$", 'unicode': "\u27e8y Jz\u27e9"}),
+	Observable(
+		'split', obs_split,
+		minmax = [-1.0, 1.0], colordata = 'symmobs',
+		str_dimless = {'plain': "sgn Jz", 'tex': r"$\langle \mathrm{sgn}(J^z)\!\rangle$", 'unicode': "\u27e8sgn Jz\u27e9"}),
+	Observable(
+		'orbital', obs_orbital,
+		minmax = [-1.0, 1.0], colordata = 'symmobs',
+		str_dimless = {'plain': "orbital", 'tex': r"$\langle P_{\Gamma_6} - P_{\Gamma_8}\rangle$", 'unicode': "\u27e8P_\u03936-P_\u03938\u27e9"}),
+	Observable(
+		'gamma6', obs_orbital_gamma6,
+		minmax = [0.0, 1.0], colordata = 'posobs',
+		str_dimless = {'plain': "Gamma6", 'tex': r"$\langle P_{\Gamma_6}\rangle$", 'unicode': "\u27e8P_\u03936\u27e9"}),
+	Observable(
+		'gamma8', obs_orbital_gamma8,
+		minmax = [0.0, 1.0], colordata = 'posobs',
+		str_dimless = {'plain': "Gamma8", 'tex': r"$\langle P_{\Gamma_8}\rangle$", 'unicode': "\u27e8P_\u03938\u27e9"}),
+	Observable(
+		'gamma8l', obs_orbital_gamma8l,
+		minmax = [0.0, 1.0], colordata = 'posobs',
+		str_dimless = {'plain': "Gamma8L", 'tex': r"$\langle P_{\Gamma_{8};\mathrm{LH}}\rangle$", 'unicode': "\u27e8P_\u03938L\u27e9"}),
+	Observable(
+		'gamma8h', obs_orbital_gamma8h,
+		minmax = [0.0, 1.0], colordata = 'posobs',
+		str_dimless = {'plain': "Gamma8H", 'tex': r"$\langle P_{\Gamma_{8};\mathrm{HH}}\rangle$", 'unicode': "\u27e8P_\u03938H\u27e9"}),
+	Observable(
+		'gamma7', obs_orbital_gamma7,
+		minmax = [0.0, 1.0], colordata = 'posobs',
+		str_dimless = {'plain': "Gamma7", 'tex': r"$\langle P_{\Gamma_7}$", 'unicode': "\u27e8P_\u03937\u27e9"}),
+	Observable(
+		'orbital[]', obs_orbital_j, obsfun_type = 'mat_indexed',
+		minmax = [0.0, 1.0], colordata = 'posobs',
+		str_dimless = {'plain': "orbital[%i]", 'tex': r"$\langle P_{\mathrm{orb}\,%i}\rangle$", 'unicode': "\u27e8P_o%i\u27e9"}),
+	Observable(
+		'px', parity_x,
+		minmax = [-1.0, 1.0], colordata = 'symmobs',
+		obsid_alias = 'parx',
+		str_dimless = {'plain': "Px", 'tex': r"$\langle \mathcal{P}_x\rangle$", 'unicode': "\u27e8Px\u27e9"}),
+	Observable(
+		'isopx', isoparity_x,
+		minmax = [-1.0, 1.0], colordata = 'symmobs',
+		obsid_alias = 'isoparx',
+		str_dimless = {'plain': "Px (iso)", 'tex': r"$\langle \tilde{\mathcal{P}}_x\rangle$", 'unicode': "\u27e8Px\u27e9 (iso)"}),
+	Observable(
+		'py', parity_y,
+		minmax = [-1.0, 1.0], colordata = 'symmobs',
+		obsid_alias = 'pary',
+		str_dimless = {'plain': "Py", 'tex': r"$\langle \mathcal{P}_y\rangle$", 'unicode': "\u27e8Py\u27e9"}),
+	Observable(
+		'isopy', isoparity_y,
+		minmax = [-1.0, 1.0], colordata = 'symmobs',
+		obsid_alias = 'isopary',
+		str_dimless = {'plain': "Py (iso)", 'tex': r"$\langle \tilde{\mathcal{P}}_y\rangle$", 'unicode': "\u27e8Py\u27e9 (iso)"}),
+	Observable(
+		'pz', parity_z,
+		minmax = [-1.0, 1.0], colordata = 'symmobs',
+		obsid_alias = 'parz',
+		str_dimless = {'plain': "Pz", 'tex': r"$\langle \mathcal{P}_z\rangle$", 'unicode': "\u27e8Pz\u27e9"}),
+	Observable(
+		'isopz', isoparity_z,
+		minmax = [-1.0, 1.0], colordata = 'symmobs',
+		obsid_alias = 'isoparz',
+		str_dimless = {'plain': "Pz (iso)", 'tex': r"$\langle \tilde{\mathcal{P}}_z\rangle$", 'unicode': "\u27e8Pz\u27e9 (iso)"}),
+	Observable(
+		'isopzw', isoparity_z_well, obsfun_type = 'params',
+		minmax = [-1.0, 1.0], colordata = 'symmobs',
+		obsid_alias = 'isoparzw',
+		str_dimless = {'plain': "Pz (iso,well)", 'tex': r"$\langle \tilde{\mathcal{P}}_{z,\mathrm{w}}\rangle$", 'unicode': "\u27e8Pz\u27e9 (iso,well)"}),
+	Observable(
+		'isopzs', isoparity_z_symm, obsfun_type = 'params',
+		minmax = [-1.0, 1.0], colordata = 'symmobs',
+		obsid_alias = 'isoparzs',
+		str_dimless = {'plain': "Pz (iso,symm)", 'tex': r"$\langle \tilde{\mathcal{P}}_{z,\mathrm{s}}\rangle$", 'unicode': "\u27e8Pz\u27e9 (iso,symm)"}),
+	Observable(
+		'pzy', parity_zy,
+		minmax = [-1.0, 1.0], colordata = 'symmobs',
+		obsid_alias = ['parzy', 'pzpy', 'pyz', 'paryz', 'pypz'],
+		str_dimless = {'plain': "Pzy", 'tex': r"$\langle \mathcal{P}_z\mathcal{P}_y\rangle$", 'unicode': "\u27e8Pz Py\u27e9"}),
+	Observable(
+		'isopzy', isoparity_zy,
+		minmax = [-1.0, 1.0], colordata = 'symmobs',
+		obsid_alias = ['isoparzy', 'isopzpy', 'isopyz', 'isoparyz', 'isopypz'],
+		str_dimless = {'plain': "Pzy (iso)", 'tex': r"$\langle \tilde{\mathcal{P}}_z\mathcal{P}_y\rangle$", 'unicode': "\u27e8Pz Py\u27e9 (iso)"}),
+	Observable(
+		'llindex', llindex_kwds, obsfun_type = 'kwds',
+		minmax = [-2.5, 17.5], colordata = 'indexed',
+		obsid_alias = ['ll_n', 'lln'],
+		str_dimless = {'plain': "n (LL)", 'tex': r"LL index $n$", 'unicode': "n (LL)"}),
+	Observable(
+		'llavg', obs_llindex,
+		minmax = [-2.5, 17.5], colordata = 'indexed',
+		str_dimless = {'plain': "<n> (LL)", 'tex': r"$\langle n\rangle$", 'unicode': "\u27e8n\u27e9 (LL)"}),
+	Observable(
+		'llmod2', obs_llindex_mod2,
+		minmax = [0.0, 1.0], colordata = 'symmobs',
+		str_dimless = {'plain': "<n mod 2> (LL)", 'tex': r"$\langle n\ \mathrm{mod}\  2\rangle$", 'unicode': "\u27e8n mod 2\u27e9 (LL)"}),
+	Observable(
+		'llmod4', obs_llindex_mod4,
+		minmax = [0.0, 3.0], colordata = 'threehalves',
+		str_dimless = {'plain': "<n mod 4> (LL)", 'tex': r"$\langle n\ \mathrm{mod}\  4\rangle$", 'unicode': "\u27e8n mod 4\u27e9 (LL)"}),
+	Observable(
+		'llbymax', llindex_max, obsfun_type = 'eivec',
+		minmax = [-2.5, 17.5], colordata = 'indexed',
+		str_dimless = {'plain': "n (maj)", 'tex': r"$n$ (majority)", 'unicode': "\u27e8n\u27e9 (maj)"}),
+	Observable(
+		'll[]', obs_ll_j, obsfun_type = 'mat_indexed',
+		minmax = [0.0, 1.0], colordata = 'posobs',
+		str_dimless = {'plain': "ll[%i]", 'tex': r"$\langle P_{\mathrm{LL}\,%i}\rangle$", 'unicode': "\u27e8P_LL%i\u27e9"}),
+	Observable(
+		'berryz', None, unit_dimless = "nm^2",
+		minmax = [-400., 400.], colordata = 'symmobs',
+		obsid_alias = 'berry',
+		str_dimless = {'plain': "Fz (Berry)", 'tex': r"$F_z$ (Berry)", 'unicode': "Fz (Berry)"}),
+	Observable(
+		'berryx', None, unit_dimless = "nm^2",
+		minmax = [-400., 400.], colordata = 'symmobs',
+		str_dimless = {'plain': "Fx (Berry)", 'tex': r"$F_x$ (Berry)", 'unicode': "Fx (Berry)"}),
+	Observable(
+		'berryy', None, unit_dimless = "nm^2",
+		minmax = [-400., 400.], colordata = 'symmobs',
+		str_dimless = {'plain': "Fy (Berry)", 'tex': r"$F_y$ (Berry)", 'unicode': "Fy (Berry)"}),
+	Observable(
+		'berryiso', None, unit_dimless = "nm^2",
+		minmax = [-400., 400.], colordata = 'symmobs',
+		obsid_alias = 'isoberry',
+		str_dimless = {'plain': "Fztilde (Berry iso)", 'tex': r"$\tilde{F}_z$ (Berry iso)", 'unicode': "Fztilde (Berry iso)"}),
+	Observable(
+		'chern', None,
+		minmax = [-3., 3.], colordata = 'symmobs',
+		str_dimless = {'plain': "C (Chern)", 'tex': r"$C$ (Chern)", 'unicode': "C (Chern)"}),
+	Observable(
+		'chernsim', None,
+		minmax = [-3., 3.], colordata = 'symmobs',
+		str_dimless = {'plain': "C (simul. Chern)", 'tex': r"$C$ (simul. Chern)", 'unicode': "C (simul. Chern)"}),
+	Observable(
+		'dedk', None, unit_dimless = "meV nm",
+		minmax = [-300., 300.], colordata = 'symmobs',
+		str_dimless = {'plain': "dE / dk", 'tex': r"$dE/dk$", 'unicode': "dE / dk"}),
+	Observable(
+		'dedkr', None, unit_dimless = "meV nm",
+		minmax = [-300., 300.], colordata = 'symmobs',
+		str_dimless = {'plain': "nabla E", 'tex': r"$\nabla E\cdot\hat{r}$", 'unicode': "\u2207E \u22c5 r"}),
+	Observable(
+		'dedkabs', None, unit_dimless = "meV nm",
+		minmax = [0., 300.], colordata = 'posobs',
+		str_dimless = {'plain': "|nabla E|", 'tex': r"$|\nabla E|$", 'unicode': "|\u2207E|"}),
+	Observable(
+		'dedkx', None, unit_dimless = "meV nm",
+		minmax = [-300., 300.], colordata = 'symmobs',
+		str_dimless = {'plain': "dE / dkx", 'tex': r"$dE/dk_x$", 'unicode': "dE / dkx"}),
+	Observable(
+		'dedky', None, unit_dimless = "meV nm",
+		minmax = [-300., 300.], colordata = 'symmobs',
+		str_dimless = {'plain': "dE / dky", 'tex': r"$dE/dk_y$", 'unicode': "dE / dky"}),
+	Observable(
+		'dedkz', None, unit_dimless = "meV nm",
+		minmax = [-300., 300.], colordata = 'symmobs',
+		str_dimless = {'plain': "dE / dkz", 'tex': r"$dE/dk_z$", 'unicode': "dE / dkz"}),
+	Observable(
+		'dedkphi', None, unit_dimless = "meV nm",
+		minmax = [-300., 300.], colordata = 'symmobs',
+		str_dimless = {'plain': "nabla E . phi", 'tex': r"$\nabla E\cdot\hat{\phi}$", 'unicode': "\u2207E \u22c5 \u03d5"}),
+	Observable(
+		'dedktheta', None, unit_dimless = "meV nm",
+		minmax = [-300., 300.], colordata = 'symmobs',
+		str_dimless = {'plain': "nabla E . theta", 'tex': r"$\nabla E\cdot\hat{\theta}$", 'unicode': "\u2207E \u22c5 \u03b8"}),
+	Observable(
+		'v', None, unit_dimless = "10^6 m/s",
+		minmax = [-0.5, 0.5], colordata = 'symmobs',
+		str_dimless = {'plain': "v", 'tex': r"$v$", 'unicode': "v"}),
+	Observable(
+		'vr', None, unit_dimless = "10^6 m/s",
+		minmax = [-0.5, 0.5], colordata = 'symmobs',
+		str_dimless = {'plain': "vr", 'tex': r"$v_r$", 'unicode': "vr"}),
+	Observable(
+		'vabs', None, unit_dimless = "10^6 m/s",
+		minmax = [0.0, 0.5], colordata = 'posobs',
+		str_dimless = {'plain': "|v|", 'tex': r"$|v|$", 'unicode': "|v|"}),
+	Observable(
+		'vx', None, unit_dimless = "10^6 m/s",
+		minmax = [-0.5, 0.5], colordata = 'symmobs',
+		str_dimless = {'plain': "vx", 'tex': r"$v_x$", 'unicode': "vx"}),
+	Observable(
+		'vy', None, unit_dimless = "10^6 m/s",
+		minmax = [-0.5, 0.5], colordata = 'symmobs',
+		str_dimless = {'plain': "vy", 'tex': r"$v_y$", 'unicode': "vy"}),
+	Observable(
+		'vz', None, unit_dimless = "10^6 m/s",
+		minmax = [-0.5, 0.5], colordata = 'symmobs',
+		str_dimless = {'plain': "vz", 'tex': r"$v_z$", 'unicode': "vz"}),
+	Observable(
+		'vphi', None, unit_dimless = "10^6 m/s",
+		minmax = [-0.5, 0.5], colordata = 'symmobs',
+		str_dimless = {'plain': "vphi", 'tex': r"$v_\phi$", 'unicode': "v\u03d5"}),
+	Observable(
+		'vtheta', None, unit_dimless = "10^6 m/s",
+		minmax = [-0.5, 0.5], colordata = 'symmobs',
+		str_dimless = {'plain': "vtheta", 'tex': r"$v_\theta$", 'unicode': "v\u03b8"}),
+	Observable(
+		'hex', obs_hexch, obsfun_type = 'params_magn', unit_dimless = "meV",
+		minmax = [-15., 15.], colordata = 'symmobs',
+		obsid_alias = ["h_ex", "hexch", "h_exch"],
+		str_dimless = {'plain': "Hexch", 'tex': r"$H_\mathrm{exch}$", 'unicode': "Hexch"}),
+	Observable(
+		'hex1t', obs_hexch1t, obsfun_type = 'params', unit_dimless = "meV",
+		minmax = [-15., 15.], colordata = 'symmobs',
+		obsid_alias = ["h_ex_1t", "hexch1t", "h_exch_1t"],
+		str_dimless = {'plain': "Hexch(1T)", 'tex': r"$H_\mathrm{exch}(1\,\mathrm{T})$", 'unicode': "Hexch(1T)"}),
+	Observable(
+		'hexinf', obs_hexchinf, obsfun_type = 'params', unit_dimless = "meV",
+		minmax = [-15., 15.], colordata = 'symmobs',
+		obsid_alias = ["h_ex_inf", "hexchinf", "h_exch_inf"],
+		str_dimless = {'plain': "Hexch(inf)", 'tex': r"$H_\mathrm{exch}(\infty)$", 'unicode': "Hexch(\u221e)"}),
+	Observable(
+		'hz', obs_hzeeman, obsfun_type = 'params_magn', unit_dimless = "meV",
+		minmax = [-5., 5.], colordata = 'symmobs',
+		obsid_alias = ["h_z", "hzeeman", "h_zeeman"],
+		str_dimless = {'plain': "HZ", 'tex': r"$H_\mathrm{Z}$", 'unicode': "HZ"}),
+	Observable(
+		'hz1t', obs_hzeeman1t, obsfun_type = 'params', unit_dimless = "meV",
+		minmax = [-5., 5.], colordata = 'symmobs',
+		obsid_alias = ["h_z1t", "hzeeman1t", "h_zeeman1t"],
+		str_dimless = {'plain': "HZ(1T)", 'tex': r"$H_\mathrm{Z}(1\,\mathrm{T})$", 'unicode': "HZ(1T)"}),
+	Observable(
+		'hstrain', obs_hstrain, obsfun_type = 'params', unit_dimless = "meV",
+		minmax = [-15., 15.], colordata = 'symmobs',
+		obsid_alias = "h_strain",
+		str_dimless = {'plain': "Hstrain", 'tex': r"$H_\mathrm{strain}$", 'unicode': "Hstrain"}),
+])
+
+jwell_warning_issued = False
+obs_error_issued = False
+def observables(eivecs, params, obs, llindex = None, overlap_eivec = None, magn = None):
+	"""Calculate observables from eigenvectors
+
+	Arguments:
+	eivecs            Numpy array of two dimensions.
+	params            PhysParams instance.
+	obs               List of strings. Observable ids for which to calculate the
+	                  values.
+	llindex           Integer or None. Necessary for the llindex observable.
+	observable_eivec  Dict instance, whose keys are band labels (characters) and
+	                  values are one-dimensional arrays. This is for calculating
+	                  overlaps of the current eigenvectors (eivecs) with the
+	                  values of observable_eivec.
+	magn              Float, Vector instance or None. If not None, the magnetic
+	                  field strength.
+
+	Returns:
+	Numpy array of complex numbers. The size is (nobs, neig), where nobs is the
+	number of observables and neig the number of eigenvectors. The values are
+	the observable values for the observables in obs.
+	"""
+	global obs_error_issued
+	nz = params.nz
+	ny = params.ny
+	norb = params.norbitals
+	tp = False
+	# tp ('transpose') determines whether eigenvectors are in a transposed
+	# configuration in eivec. For the standard returned data of eigsh: tp = True
+
+	if eivecs.shape[0] == norb * ny * nz:  # for 1D
+		neig = eivecs.shape[1]
+		tp = True
+	elif eivecs.shape[1] == norb * ny * nz:  # for 1D, inverted order
+		neig = eivecs.shape[0]
+	elif eivecs.shape[0] == norb * nz:  # for 2D
+		ny = 1
+		neig = eivecs.shape[1]
+		tp = True
+	elif eivecs.shape[1] == norb * nz:  # for 2D, inverted order
+		ny = 1
+		neig = eivecs.shape[0]
+	elif eivecs.shape == (norb, norb):  # for bulk
+		ny = 1
+		nz = 1
+		neig = norb
+		tp = True  # transposition is necessary, as in the other cases
+	else:
+		raise ValueError("Eigenvectors have incorrect number of components")
+
+	# Determine whether there are observables that refer to the quantum well or its interfaces
+	# If so, try to determine its layer index. If not found, raise a warning
+	well_obs = ["zif", "z_if"] + ["zif2", "z_if2", "zif^2", "z_if^2"] + ["well"] + ["extwell", "wellext", "well_ext"]
+	well_obs_present = [o for o in well_obs if o in obs]
+	if len(well_obs_present) > 0:
+		global jwell_warning_issued
+		jwell = params.layerstack.layer_index("well")
+
+		if jwell is None and not jwell_warning_issued:
+			sys.stderr.write("Warning: The well layer could not be identified. The requested observables %s have been set to 0.\n" % ", ".join(well_obs_present))
+			jwell_warning_issued = True
+
+	# Process observables
+	nobs = len(obs)
+	obsvals = np.zeros((nobs, neig), dtype = complex)
+	obs_error = []
+	for i in range(0, nobs):
+		if obs[i] in all_observables:
+			o = all_observables[obs[i]]
+			# print ("OBS", o.obsid, o.obsfun, o.obsfun_type)
+			if o.obsfun_type == 'none' or o.obsfun is None:
+				obsvals[i, :] = float("nan")
+			elif o.obsfun_type == 'mat':
+				op = o.obsfun(nz, ny, norb)
+				for j in range(0, neig):
+					v = eivecs[:, j] if tp else eivecs[j]
+					norm2 = np.real(np.vdot(v, v))
+					obsval = np.vdot(v, op.dot(v))
+					obsvals[i, j] = obsval / norm2
+			elif o.obsfun_type == 'mat_indexed':
+				idx = get_index_from_obs_string(obs[i])
+				op = o.obsfun(nz, ny, norb, idx)
+				for j in range(0, neig):
+					v = eivecs[:, j] if tp else eivecs[j]
+					norm2 = np.real(np.vdot(v, v))
+					obsval = np.vdot(v, op.dot(v))
+					obsvals[i, j] = obsval / norm2
+			elif o.obsfun_type == 'params':
+				op = o.obsfun(nz, ny, params)
+				for j in range(0, neig):
+					v = eivecs[:, j] if tp else eivecs[j]
+					norm2 = np.real(np.vdot(v, v))
+					obsval = np.vdot(v, op.dot(v))
+					obsvals[i, j] = obsval / norm2
+			elif o.obsfun_type == 'params_indexed':
+				idx = get_index_from_obs_string(obs[i])
+				op = o.obsfun(nz, ny, params, idx)
+				for j in range(0, neig):
+					v = eivecs[:, j] if tp else eivecs[j]
+					norm2 = np.real(np.vdot(v, v))
+					obsval = np.vdot(v, op.dot(v))
+					obsvals[i, j] = obsval / norm2
+			elif o.obsfun_type == 'params_magn':
+				op = o.obsfun(nz, ny, params, magn = magn)
+				for j in range(0, neig):
+					v = eivecs[:, j] if tp else eivecs[j]
+					norm2 = np.real(np.vdot(v, v))
+					obsval = np.vdot(v, op.dot(v))
+					obsvals[i, j] = obsval / norm2
+			elif o.obsfun_type == 'eivec':
+				for j in range(0, neig):
+					obsvals[i, j] = o.obsfun(eivecs[:, j] if tp else eivecs[j], nz, ny, norb)
+			elif o.obsfun_type == 'kwds':
+				obsvals[i, :] = np.array(o.obsfun(nz, ny, llindex = llindex))
+			else:
+				obs_error.append(obs[i])
+		elif overlap_eivec is not None and obs[i] in overlap_eivec:
+			# overlap with labeled eigenvector
+			w = overlap_eivec[obs[i]]
+			normw2 = np.real(np.vdot(w, w))
+			for j in range(0, neig):
+				v = eivecs[:, j] if tp else eivecs[j]
+				normv2 = np.real(np.vdot(v, v))
+				if len(w) == nz * norb and ny > 1:
+					obsvals[i, j] = 0
+					size = nz * norb
+					for m in range(0, ny):
+						overlap = np.vdot(w, v[m * size: (m+1) * size])
+						obsvals[i, j] += np.abs(overlap) ** 2 / normv2 / normw2
+				else:
+					overlap = np.vdot(w, v)
+					obsvals[i, j] = np.abs(overlap) ** 2 / normv2 / normw2
+		else:
+			obs_error.append(obs[i])
+	if len(obs_error) > 0 and not obs_error_issued:
+		sys.stderr.write("ERROR (observables): Observables %s could not be calculated.\n" % (", ".join(obs_error)))
+		obs_error_issued = True
+	return obsvals
+
+def regularize_observable(eival1, eival2, obsval1, obsval2):
+	""""Regularize" observable values
+	If the observable value suddenly jumps, 'cross over' the eigenvalues and
+	observable values if this seems more plausible froma physical perspective.
+	The algorithm uses successive linear extrapolation to predict the next value
+	of the observable and then selects the actual value that lies closest to it.
+
+	Note:
+	Originally, this function was designed for the Berry curvature and
+	generalized later.
+
+	Arguments:
+	eival1, eival2    One-dimensional arrays. Eigenvalues (as function of
+	                  momentum, for example).
+	obsval1, obsval2  One-dimensional arrays. Observable values (as function of
+	                  momentum, for example).
+
+	Returns:
+	eival1new, eival2new    One-dimensional arrays with 'crossed-over'
+	                        eigenvalues.
+	obsval1new, obsval2new  One-dimensional arrays with 'crossed-over'
+	                        observable values.
+	"""
+	if len(eival1) != len(eival2) or len(obsval1) != len(obsval2) or len(eival1) != len(obsval1):
+		raise ValueError("All inputs must have the same length")
+
+	l = len(obsval1)
+	if l <= 2:
+		return eival1, eival2, obsval1, obsval2
+
+	eival1new = [eival1[0], eival1[1]]
+	eival2new = [eival2[0], eival2[1]]
+	obsval1new = [obsval1[0], obsval1[1]]
+	obsval2new = [obsval2[0], obsval2[1]]
+
+	for j in range(2, l):
+		# predict new values
+		obsval1pre = 2 * obsval1new[-1] - obsval1new[-2]
+		obsval2pre = 2 * obsval2new[-1] - obsval2new[-2]
+		diff_11_22 = abs(obsval1pre - obsval1[j]) + abs(obsval2pre - obsval2[j])
+		diff_12_21 = abs(obsval1pre - obsval2[j]) + abs(obsval2pre - obsval1[j])
+		if diff_11_22 <= diff_12_21:
+			eival1new.append(eival1[j])
+			eival2new.append(eival2[j])
+			obsval1new.append(obsval1[j])
+			obsval2new.append(obsval2[j])
+		else:
+			eival1new.append(eival2[j])
+			eival2new.append(eival1[j])
+			obsval1new.append(obsval2[j])
+			obsval2new.append(obsval1[j])
+
+	if isinstance(eival1, np.ndarray):
+		return np.array(eival1new), np.array(eival2new), np.array(obsval1new), np.array(obsval2new)
+	else:
+		return eival1new, eival2new, obsval1new, obsval2new
+
+def get_all_obsids(kdim=0, ll=False, norb=8, opts=None):
+	"""Give all obsids for a given dimension and number of orbitals
+	These are the observables that should be calculated and those which end up
+	in the output files.
+
+	Arguments:
+	kdim    1, 2, or 3. The dimensionality (number of momentum directions).
+	ll      True or False. Whether or not a Landau level calculation.
+	norb    6 or 8. The number of orbitals in the model
+	opts    Dict or None. General options (from the command line).
+
+	Returns:
+	obsids  List of strings.
+	"""
+	if opts is None:
+		opts = {}
+	if kdim == 3 and not ll:  # bulk
+		obsids = ["jz", "jx", "jy", "sz", "sx", "sy", "split", "orbital", "gamma6",
+			"gamma8", "gamma8h", "gamma8l", "gamma7", "jz6", "jz8", "jz7", "isopz",
+			"hex", "hz"]
+	elif kdim == 2 and not ll:  # 2d
+		obsids = ["jz", "jx", "jy", "sz", "sx", "sy", "split", "orbital",
+			"gamma6", "gamma8", "gamma8h", "gamma8l", "gamma7", "jz6", "jz8", "jz7",
+			"z", "z2", "zif", "zif2", "well", "wellext", "interface", "interfacechar",
+			"interface10nm", "interfacechar10nm", "iprz", "pz", "isopz", "isopx",
+			"isopy", "isopzw", "isopzs", "hex", "hex1t", "hexinf", "hz", "hz1t"]
+	elif kdim == 1 and not ll:  # 1d
+		obsids = ["y", "y2", "yjz", "jz", "jx", "jy", "sz", "sx", "sy", "split",
+			"orbital", "gamma6", "gamma8", "gamma8h", "gamma8l", "gamma7", "jz6",
+			"jz8", "jz7", "z", "z2", "zif", "zif2", "iprz", "ipry", "ipryz",
+			"pz", "isopz", "px", "isopx", "py", "isopy", "pzy", "isopzy", "hex",
+			"hz"]
+	elif kdim == 3 and ll:  # bulk-ll
+		obsids = ["jz", "jx", "jy", "sz", "sx", "sy", "split", "orbital",
+			"gamma6", "gamma8", "gamma8h", "gamma8l", "gamma7", "jz6", "jz8",
+			"jz7", "hex", "hz"]
+	elif kdim == 2 and ll:  # ll
+		obsids = ["jz", "jx", "jy", "sz", "sx", "sy", "split", "orbital",
+			"gamma6", "gamma8", "gamma8h", "gamma8l", "gamma7", "jz6",
+			"jz8", "jz7", "z", "z2", "zif", "zif2", "well", "wellext",
+			"interface", "interfacechar", "interface10nm", "interfacechar10nm",
+			"iprz", "pz", "isopz", "hex", "hz"]
+	else:
+		raise ValueError("Invalid combination of arguments kdim and ll")
+	if norb == 6:
+		obsids = [oi for oi in obsids if not oi.endswith('7')]
+
+	# Orbital-specific observables
+	# TODO: Can the condition be relaxed?
+	if opts.get('orbitalobs') and kdim in [1, 2] and not ll:
+		obsids.extend(['orbital[%i]' % (j + 1) for j in range(0, norb)])
+
+	# Custom interface length
+	# TODO: Can the condition be relaxed?
+	if opts.get('custom_interface_length') is not None and kdim in [1, 2] and not ll:
+		obsids.extend(["custominterface[%i]" % opts['custom_interface_length'],
+		               "custominterfacechar[%i]" % opts['custom_interface_length']])
+
+	return obsids
+
+def plotobs_apply_llmode(plotopts, ll_mode = None):
+	"""Set plot observable automatically based on LL mode
+
+	Arguments:
+	plotopts  Dict instance with plot options. Note: The instance may be
+	          modified if ll_mode is set.
+	ll_mode   String. The LL mode.
+
+	Returns:
+	plotobs   String or None. The plot observable.
+	"""
+	if plotopts.get('obs') is None:
+		return None
+	elif ll_mode is None:
+		return plotopts['obs']
+	if '.' in plotopts['obs']:
+		obs_split = plotopts['obs'].split('.')
+		obs1, obs2 = obs_split[0], '.'.join(obs_split[1:])
+	else:
+		obs1, obs2 = plotopts['obs'], None
+	if ll_mode == 'full' and obs1 in ['llindex', 'll_n', 'lln']:
+		sys.stderr.write(f"Warning (plotobs_apply_llmode): Observable '{obs1}' cannot be used in 'full' LL mode. Use observable 'llavg' instead.\n")
+		plotopts['obs'] = 'llavg' if obs2 is None else 'llavg' + '.' + obs2
+	if ll_mode != 'full' and obs1 in ['llavg', 'llmax', 'llbymax']:
+		sys.stderr.write(f"Warning (plotobs_apply_llmode): Observable '{obs1}' cannot be used in '{ll_mode}' LL mode. Use observable 'llindex' instead.\n")
+		plotopts['obs'] = 'llindex' if obs2 is None else 'llindex' + '.' + obs2
+	return plotopts['obs']
diff --git a/kdotpy-v1.0.0/src/kdotpy/parallel.py b/kdotpy-v1.0.0/src/kdotpy/parallel.py
new file mode 100644
index 0000000000000000000000000000000000000000..d262ecafb778b5c2ee01d520144c066b4e361d7b
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/parallel.py
@@ -0,0 +1,586 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+from os import environ
+environ['OMP_NUM_THREADS'] = '1'
+import sys
+from platform import system
+import signal
+from time import sleep, time as rtime
+from datetime import timedelta, datetime as dtime
+import multiprocessing as mp
+import multiprocessing.dummy as th
+
+## Job monitor
+job_monitor_enabled = True
+def set_job_monitor(state):
+	"""Set global state of 'job monitor' (True or False)"""
+	global job_monitor_enabled
+	job_monitor_enabled = state
+	return
+
+def show_job_monitor(s):
+	"""Show 'job monitor' if it is enabled"""
+	global job_monitor_enabled
+	if job_monitor_enabled:
+		sys.stderr.write("***  " + s + "\n")
+		sys.stderr.flush()
+	return
+
+def job_monitor_k_b(k, b):
+	"""Formatting function for 'job monitor'"""
+	if k == 0.0 and b == 0.0:
+		return ("%g" % k) if isinstance(k, (float, int)) else str(k)
+	elif k == 0.0 and b != 0.0:
+		return ("%g" % b) if isinstance(b, (float, int)) else str(b)
+	elif k != 0.0 and b == 0.0:
+		return ("%g" % k) if isinstance(k, (float, int)) else str(k)
+	else:
+		k_str = ("%g" % b) if isinstance(b, (float, int)) else str(b)
+		b_str = ("%g" % k) if isinstance(k, (float, int)) else str(k)
+		return "%s %s" % (k_str, b_str)
+
+def display_etl(s):
+	"""Format estimated time left (ETL)
+
+	Argument:
+	s   Time in seconds
+	"""
+	s = round(s)
+	if s >= 86400:
+		d = s // 86400
+		h = (s - d * 86400) // 3600
+		return "%3id%ih" % (d, h)
+	if s >= 3600:
+		h = s // 3600
+		m = (s - h * 3600) // 60
+		return "%2ih%im" % (h, m)
+	m = s // 60
+	s1 = s - m * 60
+	return "%2im%02is" % (m, s1)
+
+def display_etf(s):
+	"""Format estimated time finished (ETF)
+
+	Argument:
+	s   Time in seconds
+	"""
+	delta = timedelta(seconds = s)
+	now = dtime.now()
+	etf = now + delta
+	today = now.date()
+	etfday = etf.date()
+	if s <= 600:
+		return etf.strftime("%H:%M:%S")
+	elif etfday == today:
+		return etf.strftime("%H:%M")
+	elif s <= 518400:  # 6 * 86400
+		return etf.strftime("%a %H:%M")
+	else:
+		return etf.strftime("%Y-%m-%d")
+
+class Progress:
+	"""Container class for progress counter.
+
+	This class shows a progress counter like 2 / 100 and calculates and prints
+	the ETL or ETF. The calculation takes into account multiple threads.
+
+	Attributes:
+	tm0            Starting time
+	tm1            ?
+	string         Status string, e.g., 'Doing diagonalization...'
+	jobs_done      Integer number of completed jobs (internal)
+	n_jobs         Total number of jobs (set on init)
+	n_threads      Number of threads. Default is 1.
+	update_delay   Float. If nonzero, update counter every this many seconds.
+	               If zero, always update if the counter has changed.
+	always_update  True or False. If True, also update counter if the number of
+	               completed jobs did not increase.
+	show_etf       True or False. If True, print ETF. If False, print ETL.
+	tcompl         List of length n_threads.
+	fancy_counter  True or False. Whether to use a fancy counter (that updates
+	               on one location in the terminal). The normal style is to
+	               print each update of the counter on a new line. This is set
+	               automatically.
+	"""
+	def __init__(self, string, n_jobs, n_threads = 1, update_delay = 0.0, always_update = False, no_output = False):
+		global job_monitor_enabled
+		self.tm0 = rtime()
+		self.tm1 = rtime()
+		self.string = string.strip('\n')
+		if not no_output:
+			sys.stderr.write(self.string + '...\n')  # write string to sys.stderr
+			sys.stderr.flush()
+		self.jobs_done = 0
+		self.n_jobs = n_jobs
+		self.n_threads = n_threads
+		self.update_delay = update_delay
+		self.always_update = always_update
+		self.no_output = no_output
+		self.show_etf = ("showetf" in sys.argv or "monitoretf" in sys.argv)
+		self.tcompl = [None for _ in range(0, n_jobs)]
+		self.fancy_counter = sys.stderr.isatty() and not job_monitor_enabled
+
+	def show(self, jobsdone):
+		"""Show progress and estimated time left/finished (ETL/ETF)
+
+		Argument:
+		jobsdone  Integer. Update the counter to this value.
+		"""
+		if not self.no_output and (self.always_update or jobsdone > self.jobs_done) and (rtime() - self.tm1 > self.update_delay):
+			s = "%i / %i" % (jobsdone, self.n_jobs)
+			if 1 <= jobsdone <= self.n_jobs:
+				est_time_left = self.est_time_left(jobsdone)
+				if est_time_left is not None and est_time_left > 0.0:
+					if self.show_etf:
+						s += " [ETF %s]" % display_etf(est_time_left)
+					else:
+						s += " [ETL %s]" % display_etl(est_time_left)
+			endline = '' if self.fancy_counter and jobsdone < self.n_jobs else '\n'
+			clearline = '\r\x1b[K' if self.fancy_counter else ''  # ANSI escape: ESC [ K
+			sys.stderr.write(clearline + s + endline)  # write string to sys.stderr
+			sys.stderr.flush()
+			self.jobs_done = jobsdone
+			self.tm1 = rtime()
+
+	def show_do(self, jobsdone, value):
+		"""Show progress and return the value that has been input without change
+		Typical usage: [Progress.show_do(j, f(x)) for j, x in enumerate(xvals)]
+		Note that the value is calculated before it is passed to the function;
+		hence the +1.
+		"""
+		self.show(jobsdone + 1)
+		return value
+
+	def est_time_left(self, jobsdone):
+		"""Advanced calculation of ETL, that uses the times at which the previous jobs have completed
+		This function takes into account multiple threads."""
+		t = rtime()
+		if jobsdone > self.jobs_done:
+			for j in range(self.jobs_done, jobsdone):
+				self.tcompl[j] = t - self.tm0
+
+		thr_time = [None for thr in range(0, self.n_threads)]
+		thr_done = [   0 for thr in range(0, self.n_threads)]
+		thr_total= [self.n_jobs // self.n_threads for thr in range(0, self.n_threads)]
+		for j in range(0, self.n_jobs % self.n_threads):
+			thr_total[j] += 1
+		for j in range(0, self.n_jobs):
+			if self.tcompl[j] is None:
+				break
+			thr_time[j % self.n_threads] = self.tcompl[j]
+			thr_done[j % self.n_threads] += 1
+
+		if thr_time[0] is None:
+			return None
+		else:
+			thr_est = [thr_time[j] * thr_total[j] / thr_done[j] - (t - self.tm0) for j in range(0, self.n_threads) if thr_time[j] is not None]
+			return max(max(thr_est), 0)
+
+### FUNCTIONS FOR PARALLELIZATION ###
+# Apply the specified functions over a list of values (vals), with extra
+# parameters (f_args) and keyword parameters (f_kwds). We fall back to a simple
+# evaluation if the number of processes is set to 1, or if the number of values
+# equals 1.
+
+## Signal handling
+## This exercise is necessary in order to deal properly with the signal events,
+## for example:
+##   KeyboardInterrupt   The main process should respond to KeyboardInterrupt,
+##                       but the worker process should ignore it. When this
+##                       event occurs, the main process should terminate all
+##                       worker processes. The program is allowed to continue
+##                       with incomplete data. (But usually will fail elsewhere
+##                       because of that.)
+##   SIGTERM, SIGABRT etc. to the main process
+##                       Here, before termination, we raise a custom exception
+##                       with the signal number. The main process catches the
+##                       exception, terminates to worker processes, and exits
+##                       with the appropriate exit code (signal number + 128).
+##                       This step is necessary, because if the main process
+##                       dies without terminating the worker processes, the
+##                       latter can continue working and/or end up as zombies.
+##   SIGTERM, etc. to a worker process
+##                       If a worker process dies, it emits a SIGCHLD signal
+##                       that is caught by the main process. The main process
+##                       then terminates the other worker processes and exits.
+##                       The signal handler for SIGCHLD has to be reset before
+##                       pool.terminate() is called, because terminating the
+##                       worker processes this way will also emit SIGCHLD
+##                       signals.
+## NOTE: On some systems, pool.terminate() will deadlock in some situations.
+## This appears to be an (unintended) bug in Python. The bug report
+## [https://bugs.python.org/issue29759] might be related.
+
+def init_worker():
+	"""Define the signal handler(s) for the worker processes"""
+	signal.signal(signal.SIGTERM, signal.SIG_DFL)
+	signal.signal(signal.SIGINT, signal.SIG_IGN)
+	if system() != 'Windows':
+		signal.signal(signal.SIGCHLD, signal.SIG_IGN)
+
+class TerminateSignal(Exception):
+	"""Define a signal handler(s) exception"""
+	def __init__(self, signum = None):
+		self.signum = signum
+
+class SignalHandler:
+	"""Context for setting and resetting signal handler(s) in the main process"""
+	def sig_handler(self, s, fr):
+		raise TerminateSignal(s)
+
+	def __enter__(self):
+		if system() == 'Windows':
+			self.siglist = [signal.SIGTERM, signal.SIGABRT]
+		else:
+			self.siglist = [signal.SIGTERM, signal.SIGABRT, signal.SIGUSR1, signal.SIGUSR2, signal.SIGCHLD]
+		for s in self.siglist:
+			signal.signal(s, self.sig_handler)
+
+	def __exit__(self, exc_type, exc_value, traceback):
+		for s in self.siglist:
+			signal.signal(s, signal.SIG_DFL)
+
+class NoOpSignalHandler:
+	"""Empty context; does not change signal handling"""
+	def __enter__(self):
+		pass
+
+	def __exit__(self, exc_type, exc_value, traceback):
+		pass
+
+def signalstr(s):
+	"""String to show when signal s is caught"""
+	return "Terminated" if s == signal.SIGTERM else "Interrupted" if s == signal.SIGINT \
+		else "Aborted" if s == signal.SIGABRT else "Worker process died" if s == signal.SIGCHLD \
+		else "Terminated with signal %i" % s
+
+
+_long_time = 10000000  # Timeout for pool.apply_async.get()
+
+##
+def parallel_apply(
+	f, vals, f_args = None, f_kwds = None, num_processes = 1,
+	poll_interval = 1., description = "", showstatus = True, threads = False,
+	propagate_interrupt = False):
+	"""Iterative apply a function. Uses either a pool of worker processes or multithreading
+	Equivalent to: [f(x, *f_args, **f_kwds) for x in vals]
+
+	Arguments:
+	f              Function.
+	vals           List of valuess to iterate over.
+	f_args         Tuple of extra arguments.
+	f_kwds         Dict with extra keyword arguments
+	num_processes  Integer. Number of threads.
+	poll_interval  Float. Interval in seconds to test how many jobs have been
+	               completed.
+	description    String. Status message.
+	propagate_interrupt  True or False. Whether to propagate (re-raise) a
+	                     KeyboardInterrupt event.
+
+	Returns:
+	List of function return values.
+	"""
+	## Default values (f_args and f_kwds)
+	if f_args is None:
+		f_args = ()
+	elif not isinstance(f_args, tuple):
+		raise TypeError("Argument f_args must be a tuple or None")
+	if f_kwds is None:
+		f_kwds = {}
+	elif not isinstance(f_kwds, dict):
+		raise TypeError("Argument f_kwds must be a dict or None")
+
+	n = len(vals)
+	progress = Progress('Calculating' if description == '' else description, n, n_threads = num_processes, no_output= not showstatus)
+	if n > 1 and num_processes > 1:
+		with SignalHandler():
+			if threads:
+				pool = th.Pool(processes=num_processes)
+			else:
+				pool = mp.Pool(processes = num_processes, initializer = init_worker)
+			output = [pool.apply_async(f, args=(x,) + f_args, kwds = f_kwds) for x in vals]
+
+			try:
+				while True:
+					jobsdone = sum(1 for x in output if x.ready())
+					if jobsdone >= n:
+						break
+					progress.show(jobsdone)
+					sleep(poll_interval)
+			except TerminateSignal as ex:
+				sys.stderr.write("\nERROR (parallel_apply): %s.\n" % signalstr(ex.signum))
+				if system() != 'Windows':
+					signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+				pool.terminate()
+				pool.join()
+				sleep(1)
+				sys.stderr.write("EXIT %i\n" % (128 + ex.signum))
+				exit(128 + ex.signum)
+			except KeyboardInterrupt:
+				sys.stderr.write("\nERROR (parallel_apply): Interrupt.\n")
+				if system() != 'Windows':
+					signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+				progress.always_update = True
+				progress.show(jobsdone)
+				data = [r.get(_long_time) for r in output if r.ready()]
+				pool.terminate()
+				pool.join()
+				if propagate_interrupt:
+					raise
+			else:
+				if system() != 'Windows':
+					signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+				progress.show(n)
+				data = [r.get() for r in output]
+				pool.close()
+				pool.join()
+	else:
+		progress.show(0)
+		data = []
+		with SignalHandler():
+			if system() != 'Windows':
+				signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+			try:
+				for j, x in enumerate(vals):
+					data.append(progress.show_do(j, f(x, *f_args, **f_kwds)))
+			except TerminateSignal as ex:
+				sys.stderr.write("\nERROR (parallel_apply): %s.\n" % signalstr(ex.signum))
+				exit(128 + ex.signum)
+			except KeyboardInterrupt:
+				sys.stderr.write("\nERROR (parallel_apply): Interrupt.\n")
+				if propagate_interrupt:
+					raise
+			else:
+				progress.show(n)
+	return data
+
+def dict_plus_array_dict(d_one, d_list, j):
+	if d_list == {}:
+		return d_one
+	out = {}
+	for k in d_one:
+		out[k] = d_one[k]
+	for k in d_list:
+		if k in d_one:
+			raise KeyError("Duplicate key")
+		out[k] = d_list[k][j]
+	return out
+
+def parallel_apply_enumerate(
+	f, vals, f_args = None, f_kwds = None, fj_kwds = None, num_processes = 1,
+	poll_interval = 1., description = "", redefine_signals = True):
+	"""Iteratively apply the function f with indices of the list passed through.
+	Equivalent to: [f(j, x, *f_args, **f_kwds) for j, x in enumerate(vals)]
+
+	Arguments:
+	f                 Function.
+	vals              List of valuess to iterate over.
+	f_args            Tuple of extra arguments.
+	f_kwds            Dict with extra keyword arguments
+	fj_kwds           Dict with lists. For each element, key = fj_kwds[key][j]
+	                  is passed to the function f for iteration j.
+	num_processes     Integer. Number of threads.
+	poll_interval     Float. Interval in seconds to test how many jobs have been
+	                  completed.
+	description       String. Status message.
+	redefine_signals  True or False. If True, redefine signal handling (within
+	                  calculation scripts). If False, use standard signal
+	                  handling (e.g., with kdotpy batch). Default: True
+
+	Returns:
+	List of function return values.
+	"""
+	## Default values (f_args, f_kwds, and fj_kwds)
+	if f_args is None:
+		f_args = ()
+	elif not isinstance(f_args, tuple):
+		raise TypeError("Argument f_args must be a tuple or None")
+	if f_kwds is None:
+		f_kwds = {}
+	elif not isinstance(f_kwds, dict):
+		raise TypeError("Argument f_kwds must be a dict or None")
+	if fj_kwds is None:
+		fj_kwds = {}
+	elif not isinstance(fj_kwds, dict):
+		raise TypeError("Argument fj_kwds must be a dict or None")
+
+	n = len(vals)
+	progress = Progress('Calculating' if description == '' else description, n, n_threads = num_processes)
+	handlercontext = SignalHandler if redefine_signals else NoOpSignalHandler
+	initializer = init_worker if redefine_signals else None
+	if n > 1 and num_processes > 1:
+		with handlercontext():
+			pool = mp.Pool(processes = num_processes, initializer = initializer)
+			output = [pool.apply_async(f, args=(j, x) + f_args, kwds = dict_plus_array_dict(f_kwds, fj_kwds, j)) for j, x in enumerate(vals)]
+			try:
+				while True:
+					jobsdone = sum(1 for x in output if x.ready())
+					if jobsdone >= n:
+						break
+					progress.show(jobsdone)
+					sleep(poll_interval)
+			except TerminateSignal as ex:
+				sys.stderr.write("\nERROR (parallel_apply_enumerate): %s.\n" % signalstr(ex.signum))
+				if system() != 'Windows':
+					signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+				pool.terminate()
+				pool.join()
+				sleep(0.2)
+				exit(128 + ex.signum)
+			except KeyboardInterrupt:
+				sys.stderr.write("\nERROR (parallel_apply_enumerate): Interrupt.\n")
+				if system() != 'Windows':
+					signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+				progress.always_update = True
+				progress.show(jobsdone)
+				data = [r.get(_long_time) for r in output if r.ready()]
+				pool.terminate()
+				pool.join()
+			else:
+				if system() != 'Windows':
+					signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+				progress.show(n)
+				data = [r.get() for r in output]
+				pool.close()
+				pool.join()
+	else:
+		progress.show(0)
+		data = []
+		with handlercontext():
+			if system() != 'Windows':
+				signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+			try:
+				for j, x in enumerate(vals):
+					data.append(progress.show_do(j, f(j, x, *f_args, **f_kwds)))
+			except TerminateSignal as ex:
+				sys.stderr.write("\nERROR (parallel_apply_enumerate): %s.\n" % signalstr(ex.signum))
+				exit(128 + ex.signum)
+			except KeyboardInterrupt:
+				sys.stderr.write("\nERROR (parallel_apply_enumerate): Interrupt.\n")
+			else:
+				progress.show(n)
+	return data
+
+def parallel_apply_expand(f, vals, f_args = None, f_kwds = None, num_processes = 1, poll_interval = 1., description = ""):
+	"""Apply the function f to a list of tuples, where the tuples are expanded upon	calling the function.
+   	Equivalent to: [f(*(x + f_args), **f_kwds) for x vals]
+
+   	Arguments:
+	f              Function.
+	vals           List of valuess to iterate over.
+	f_args         Tuple of extra arguments.
+	f_kwds         Dict with extra keyword arguments
+	num_processes  Integer. Number of threads.
+	poll_interval  Float. Interval in seconds to test how many jobs have been
+	               completed.
+	description    String. Status message.
+
+	Returns:
+	List of function return values.
+   	"""
+	## Default values (f_args and f_kwds)
+	if f_args is None:
+		f_args = ()
+	elif not isinstance(f_args, tuple):
+		raise TypeError("Argument f_args must be a tuple or None")
+	if f_kwds is None:
+		f_kwds = {}
+	elif not isinstance(f_kwds, dict):
+		raise TypeError("Argument f_kwds must be a dict or None")
+
+	n = len(vals)
+	progress = Progress('Calculating' if description == '' else description, n, n_threads = num_processes)
+	for x in vals:
+		if not isinstance(x, tuple):
+			raise ValueError("Elements of vals must be tuples")
+	if n > 1 and num_processes > 1:
+		with SignalHandler():
+			pool = mp.Pool(processes = num_processes, initializer = init_worker)
+			output = [pool.apply_async(f, args=x + f_args, kwds = f_kwds) for x in vals]
+
+			try:
+				while True:
+					jobsdone = sum(1 for x in output if x.ready())
+					if jobsdone >= n:
+						break
+					progress.show(jobsdone)
+					sleep(poll_interval)
+			except TerminateSignal as ex:
+				sys.stderr.write("\nERROR (parallel_apply_expand): %s.\n" % signalstr(ex.signum))
+				if system() != 'Windows':
+					signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+				pool.terminate()
+				pool.join()
+				sleep(0.2)
+				exit(128 + ex.signum)
+			except KeyboardInterrupt:
+				sys.stderr.write("\nERROR (parallel_apply_expand): Interrupt.\n")
+				if system() != 'Windows':
+					signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+				progress.always_update = True
+				progress.show(jobsdone)
+				data = [r.get(_long_time) for r in output if r.ready()]
+				pool.terminate()
+				pool.join()
+			else:
+				if system() != 'Windows':
+					signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+				progress.show(n)
+				data = [r.get() for r in output]
+				pool.close()
+				pool.join()
+	else:
+		progress.show(0)
+		data = []
+		with SignalHandler():
+			if system() != 'Windows':
+				signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+			try:
+				for j, x in enumerate(vals):
+					data.append(progress.show_do(j, f(*(x + f_args), **f_kwds)))
+			except TerminateSignal as ex:
+				sys.stderr.write("\nERROR (parallel_apply_expand): %s.\n" % signalstr(ex.signum))
+				exit(128 + ex.signum)
+			except KeyboardInterrupt:
+				sys.stderr.write("\nERROR (parallel_apply_expand): Interrupt.\n")
+			else:
+				progress.show(n)
+	return data
diff --git a/kdotpy-v1.0.0/src/kdotpy/physconst.py b/kdotpy-v1.0.0/src/kdotpy/physconst.py
new file mode 100644
index 0000000000000000000000000000000000000000..620af86034f2d2b400136a78cc4bbb893998b3c5
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/physconst.py
@@ -0,0 +1,70 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+### PHYSICAL CONSTANTS ###
+#
+# units:
+# length:      nm
+# energy:      meV  (CAUTION: not eV!)
+# voltage:     mV   (CAUTION: not V!)
+# time:        ns
+# temperature: K
+# magn. field: T
+# charge:      e    (elementary charge; e > 0)
+
+m_e = 0.510998910e9                     # meV (electron mass in energy equivalents; E = m c^2)
+e_el = 1.6021766208e-19                 # C (elementary charge in Coulomb)
+cLight = 299792458.                     # nm / ns
+hbar = 6.582119514e-4                   # meV ns
+hbarm0 = hbar**2 * cLight**2 / m_e / 2  # ~ 38 meV nm^2
+eoverhbar = 1e-6 / hbar                 # 1 / (T nm^2) -- see note
+muB = 5.7883818012e-2                   # meV / T
+kB = 8.6173303e-2                       # meV / K
+eovereps0 = 1.80951280207e4             # mV nm
+gg = 2                                  # gyromagnetic ratio (dimensionless)
+r_vonklitzing = 25812.8074555           # ohm
+
+# Note on eoverhbar:
+# The factor 1e-6 is included such that:
+# eoverhbar * A (where A is vector potential in units of T nm) has a resulting
+#   unit of nm^-1, as appropriate for a momentum quantity
+# eoverhbar * b (where b is magnetic field / flux density in T) has a resulting
+#   unit of nm^-2, so that multiplication by an area in nm^2 yields a
+#   dimensionless quantity.
diff --git a/kdotpy-v1.0.0/src/kdotpy/physparams.py b/kdotpy-v1.0.0/src/kdotpy/physparams.py
new file mode 100644
index 0000000000000000000000000000000000000000..8b459ef485cab11722014de66f8b481f904b6610
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/physparams.py
@@ -0,0 +1,724 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+from math import sqrt, ceil, tanh, pi
+import sys
+import numpy as np
+
+from .physconst import kB, muB, eoverhbar
+from .materials import Material
+from .layerstack import LayerStack, default_layer_names
+from .latticetrans import lattice_transform
+from .strain import lattice_const_from_strain, strain_epsilondiag, strain_automatic
+
+### GENERAL FUNCTIONS
+
+def material_param(layer_material, substrate_material, a_lattice = None, strain = None, angle = 0.0, hide_strain_warning = False):
+	"""Calculate and store derived material parameters
+
+	Arguments:
+	layer_material       Material instance
+	substrate_material   Material instance or None.
+	a_lattice            Number or None. The lattice constant of the strained
+	                     material.
+	angle                Number. For a strip in a non-trivial orientation, the
+	                     angle between the longitudinal direction and the
+	                     crystal direction a.
+	hide_strain_warning  True or False. If True, hide the warning issued when
+	                     lattice constant and substrate material are both given
+	                     explicitly.
+
+	Return:
+	A dict instance with the parameters of the layer material, appropriately
+	amended with the strain(ed) values.
+	"""
+	if not isinstance(layer_material, Material):
+		raise ValueError("layer_material must be a Material instance")
+	if substrate_material is not None and not isinstance(substrate_material, Material):
+		raise ValueError("substrate_material must be None or a Material instance")
+
+	mparam = layer_material.param.copy()
+	mparam['material'] = layer_material
+	# mparam['compound'] = layer_material.name
+	mparam['aFree'] = 1. * layer_material['a']
+	if 'a' in mparam:
+		del mparam['a']
+	
+	mparam['epsilonxx'], mparam['epsilonyy'], mparam['epsilonzz'] = strain_epsilondiag(layer_material, substrate_material, strain = strain, a_lattice = a_lattice, hide_strain_warning = hide_strain_warning)
+	mparam['epsilonyz'], mparam['epsilonxz'], mparam['epsilonxy'] = 0.0, 0.0, 0.0   # for now, no off-diagonal strain
+	mparam['epsilon_par'] = (mparam['epsilonxx'] + mparam['epsilonyy']) / 2
+	return mparam
+
+
+
+
+### EXCHANGE COUPLING ###
+
+def brillouin52(x):
+	"""Brillouin function with an approximation near x = 0.
+	The approximation is better than the numerical noise (~1e-11) for |x| < 1e-5
+	A series expansion would be:
+	  B_{5/2}(x) ~ (7/15)x - (259/5625)x^3 + (2666/421875)x^5 - (47989/52734375)x^7 + ...
+	The radius of convergence of this expansion is R = (5/6) pi ~ 2.6.
+	"""
+	return x * 7 / 15 if (abs(x) < 1e-5) else (6 / 5) / tanh(x * 6 / 5) - (1 / 5) / tanh(x * 1 / 5)
+
+def Aexchange(magn, temperature, g=0.0, TK0=0.0):
+	"""Aexchange / nbeta as function of magnetic field and temperature"""
+	if g == 0.0:
+		return 0.0
+	elif isinstance(magn, (float, np.floating, int, np.integer)):
+		return (-1 / 6) * (-5 / 2) * brillouin52( (5 / 2) * g * muB * magn / kB / (temperature + TK0) )
+	elif isinstance(magn, tuple) and len(magn) == 3:
+		bb = np.sqrt(magn[0]**2 + magn[1]**2 + magn[2]**2)
+		if bb == 0.0:
+			return 0.0, 0.0, 0.0
+		else:
+			Aexabs = (-1 / 6) * (-5 / 2) * brillouin52( (5 / 2) * g * muB * bb / kB / (temperature + TK0) )
+			return Aexabs * magn[0] / bb, Aexabs * magn[1] / bb, Aexabs * magn[2] / bb
+	else:
+		raise TypeError("Input must be float or 3-tuple")
+
+### PhysParams CLASS ###
+
+class PhysParams:
+	"""Container class for physical parameters.
+	The parameters may be returned as a function of z.
+
+	Attributes (arguments):
+	kdim
+	norbitals
+	zres
+	yres
+	linterface
+	ly_width (width)
+	yconfinement
+	strain_direction
+	strip_angle (strip_direction)
+	magn
+	temperature
+	substrate_material
+	a_lattice
+	- (rel_strain)
+	- (strain_angle)
+	- (layer_types)
+	layer_material (m_layers)
+	layer_stack
+	cache_param
+	cache_z
+	lz_thick
+	nz
+	zInterface
+	nlayer
+	c_dz, c_dz2
+	c_dy, c_dy2
+	ny
+	ymid
+	ninterface
+	dzinterface
+	"""
+	def __init__(
+		self, kdim = None, l_layers = None, m_layers = None, layer_types = None,
+		layer_density = None, zres = None, linterface = None, width = None,
+		yres = None, magn = None, temperature = None, yconfinement = None,
+		substrate_material = None,	strain_direction = None, a_lattice = None,
+		rel_strain = None, norbitals = None, lattice_orientation = None,
+		matdef_renorm = True, hide_yconfinement_warning = False,
+		hide_strain_warning = False):
+		# Default values (l_layers, m_layers)
+		if l_layers is None:
+			l_layers = []
+		if m_layers is None:
+			m_layers = []
+
+		# Number of k dimensions
+		if kdim in [1, 2, 3]:
+			self.kdim = kdim
+		else:
+			sys.stderr.write("ERROR: The number of momentum dimensions must be 1, 2, or 3.\n")
+			exit(1)
+
+		# Number of orbitals
+		if norbitals is None:
+			self.norbitals = 6
+		elif norbitals in [6, 8]:
+			self.norbitals = norbitals
+		else:
+			sys.stderr.write("ERROR: The number of orbitals must be either 6 or 8.\n")
+			exit(1)
+
+		# Resolution (discretization of the derivatives)
+		if zres is None and kdim <= 2:
+			sys.stderr.write("ERROR: Resolution zres is required explicitly for 1D and 2D.\n")
+			exit(1)
+		elif zres is None:
+			zres = 0.25  # resolution in z direction -- default value for kdim >= 3
+		if zres <= 0.0:
+			sys.stderr.write("ERROR: Resolution zres must be positive\n")
+			exit(1)
+		self.zres = zres
+
+		if yres is None and kdim <= 1:
+			sys.stderr.write("ERROR: Resolution yres is required explicitly for 1D.\n")
+			exit(1)
+		elif yres is None:
+			yres = 0.25  # resolution in y direction -- default value for kdim >= 2
+		if yres <= 0.0:
+			sys.stderr.write("ERROR: Resolution yres must be positive\n")
+			exit(1)
+		self.yres = yres
+
+		# Interface thickness
+		if linterface is None:
+			linterface = 0.075  # nm -- default value
+		if linterface <= 0.0 or linterface > 10.0:
+			sys.stderr.write("ERROR: Interface thickness out of range\n")
+			exit(1)
+		self.linterface = linterface
+
+		# Width (y dimension) of the sample
+		if width is None and kdim <= 1:
+			sys.stderr.write("ERROR: Sample width is required explicitly for 1D.\n")
+			exit(1)
+		elif width is None:
+			width = 1.0    # width -- default value for kdim >= 2
+		if width < 0.0:
+			sys.stderr.write("ERROR: Sample width must be positive\n")
+			exit(1)
+		self.ly_width = width
+
+		# Confinement potential in y direction
+		if yconfinement is None:
+			yconfinement = 1e5
+		if self.kdim >= 2:
+			self.yconfinement = 0
+		elif yconfinement < 0:
+			sys.stderr.write("ERROR: Confinement in y direction should not be negative.\n")
+			exit(1)
+		elif yconfinement == 0:
+			if not hide_yconfinement_warning:
+				sys.stderr.write("Warning: No confinement in y direction is not recommended. Choose a value >= 50000 meV.\n")
+		elif yconfinement <= 1000:
+			if not hide_yconfinement_warning:
+				sys.stderr.write("Warning: Confinement in y direction < 50000 meV can lead to strange results. Did you mean %s meV?\n" % (1000 * yconfinement))
+		elif yconfinement < 5e4:
+			if not hide_yconfinement_warning:
+				sys.stderr.write("Warning: Confinement in y direction < 50000 meV can lead to strange results.\n")
+		elif yconfinement > 1e6:
+			sys.stderr.write("ERROR: Confinement in y direction exceeds maximum 10^6 meV.\n")
+			exit(1)
+		self.yconfinement = yconfinement
+
+		# Strain
+		if strain_direction is not None:
+			sys.stderr.write("Warning: Argument strain_direction is deprecated, and is ignored. In order to replicate the behaviour for strain axis other than z, use 'strain' with the appropriate numerical inputs.\n")
+		if isinstance(rel_strain, tuple) and len(rel_strain) == 3:
+			rel_strain = strain_automatic(rel_strain)
+
+		# Orientation
+		self.lattice_orientation = None
+		self.lattice_trans = None
+		if isinstance(lattice_orientation, (int, np.integer, float, np.floating)):
+			self.lattice_orientation = lattice_orientation
+			self.lattice_trans = lattice_orientation
+		elif isinstance(lattice_orientation, tuple) and len(lattice_orientation) == 3 and all([isinstance(x, int) for x in lattice_orientation]):
+			if lattice_orientation[2] != 0:
+				sys.stderr.write("ERROR: Third component of the strip direction must be 0.\n")
+				exit(1)
+			if lattice_orientation[0] == 0 and lattice_orientation[1] == 0:
+				sys.stderr.write("ERROR: Strip direction must not be (0,0,0).\n")
+				exit(1)
+			self.lattice_orientation = [lattice_orientation]
+			self.lattice_trans = np.arctan2(lattice_orientation[1], lattice_orientation[0]) * 180 / np.pi
+		else:
+			try:
+				self.lattice_trans = lattice_transform(lattice_orientation)
+			except:
+				sys.stderr.write("ERROR: Not a valid lattice transformation.\n")
+				raise
+			self.lattice_orientation = lattice_orientation
+		if isinstance(self.lattice_trans, (int, np.integer, float, np.floating)) and np.abs(self.lattice_trans) > 1e-6 and kdim != 1:
+			sys.stderr.write("Warning: Strip direction is irrelevant for momentum dimension %i.\n" % kdim)
+			self.lattice_trans = None
+		if 'verbose' in sys.argv:
+			print("Lattice transformation:")
+			print(self.lattice_orientation)
+			print(self.lattice_trans)
+
+		### EXTERNAL ENVIRONMENT
+		if magn is None:
+			magn = 0.0    # Magnetic field in T -- default value
+		self.magn = magn
+
+		if temperature is None:
+			temperature = 0.0   # Temperature in K -- default value
+		if temperature < 0.0:
+			sys.stderr.write("ERROR: Temperature must be positive\n")
+			exit(1)
+		self.temperature = temperature
+
+		## LAYER STACK, MATERIAL PARAMETERS ##
+
+		# Layer types/names
+		if layer_types is None:
+			lnames = None
+		elif isinstance(layer_types, str):
+			lnames1 = []
+			for l in layer_types.lower():
+				if l not in default_layer_names:
+					sys.stderr.write("ERROR: Invalid layer type '%s'.\n" % l)
+					exit(1)
+				lnames1.append(default_layer_names[l])
+			lnames = []
+			for j, l in enumerate(lnames1):
+				if lnames1.count(l) == 1:
+					lnames.append(l)
+				else:
+					c = lnames1[:j].count(l) + 1
+					lnames.append(l + ("%i" % c))
+		else:  # TODO: list
+			raise TypeError("Argument layer_types must be a string or None.")
+		if lnames is not None and len(lnames) != len(m_layers):
+			sys.stderr.write("ERROR: List of layer names has incorrect length.\n")
+			exit(1)
+
+		# Lattice parameter (set by substrate)
+		self.substrate_material = substrate_material
+		ref_layer_index = None
+		if rel_strain == 'none':
+			if a_lattice is not None:
+				sys.stderr.write("Warning: Strain is ignored, so 'a_lattice' does not have an effect.\n")
+			a_lattice = None
+			self.a_lattice = 0.65
+		elif a_lattice is None and rel_strain is None:
+			if substrate_material is None:
+				sys.stderr.write("ERROR: For determination of strain, one of the following three arguments is required:\n\'msubst\' (substrate material), \'a_lattice\' (lattice constant), or \'strain\' (relative strain).\n")
+				exit(1)
+			else:
+				self.a_lattice = self.substrate_material['a']
+		elif a_lattice is not None and rel_strain is None:
+			self.a_lattice = a_lattice
+		elif a_lattice is None and rel_strain is not None:
+			# The reference material is the well layer:
+			# second layer if 2 or 3 layers, first if 1 layer, otherwise raise an error
+			if lnames is not None:
+				if 'well' in lnames:
+					ref_layer_index = lnames.index('well')
+				else:
+					sys.stderr.write("ERROR: Layer names are given, but the 'well' could not be identified uniquely.\n")  # Second error message will follow below
+			elif len(m_layers) <= 3:
+				ref_layer_index = 0 if len(m_layers) == 1 else 1
+			if ref_layer_index is None:
+				sys.stderr.write("ERROR: Cannot determine the well layer for calculation of lattice constant from relative strain.\nPlease input strain using \'a_lattice\' or \'msubst\'.\n")
+				exit(1)
+			m_ref = m_layers[ref_layer_index]
+			a_lattice = lattice_const_from_strain(rel_strain, m_ref)
+			self.a_lattice = a_lattice
+		else:
+			sys.stderr.write("Warning: Relative strain is ignored if lattice constant is given.\n")
+			self.a_lattice = a_lattice
+
+		# Material parameters
+		strain_angle = self.lattice_trans if kdim == 1 and isinstance(self.lattice_trans, (int, np.integer, float, np.floating)) and np.abs(self.lattice_trans) > 1e-6 else 0.0
+		m_param = []
+		for j, mat in enumerate(m_layers):
+			strain_arg = rel_strain if j == ref_layer_index or rel_strain == 'none' else None
+			m_param.append(material_param(mat, self.substrate_material, a_lattice = a_lattice, strain = strain_arg, angle = strain_angle, hide_strain_warning = hide_strain_warning))
+		self.layer_material = m_layers  # this is not stored in the LayerStack instance, so save it here
+
+		# Layer data
+		self.layerstack = LayerStack(tuple(m_param), l_layers, zres = self.zres, names = lnames)
+		if matdef_renorm:
+			self.layerstack.renormalize_to(norbitals)
+		elif norbitals != self.layerstack.matdef_orbitals:
+			sys.stderr.write("Warning: Using parameters for %i-orbital model in %i-orbital model without renormalization.\n" % (self.layerstack.matdef_orbitals, norbitals))
+		self.cache_param = None
+		self.cache_z = None
+		if layer_density is not None and layer_density != []:
+			self.layerstack.set_density(layer_density)
+
+		# Geometry (z dimension)
+		self.lz_thick = self.layerstack.lz_thick      # Total thickness (nm)
+		self.nz = self.layerstack.nz                  # Lattice points
+		self.zinterface = self.layerstack.zinterface  # Interfaces (z coordinates in lattice points
+		self.nlayer = self.layerstack.nlayer          # Number of layers
+
+		## OTHER DERIVED QUANTITIES ##
+
+		## Coefficients of discretisation of derivatives
+		self.c_dz = -1.j / (2 * self.zres)
+		self.c_dz2 = -1. / (self.zres**2)
+		self.c_dy = -1.j / (2 * self.yres)
+		self.c_dy2 = -1. / (self.yres**2)
+
+		# Lattice points (y dimension)
+		self.ny = int(ceil(self.ly_width / self.yres - 1e-10))  # small offset to avoid rounding errors
+
+		# Center in y dimension
+		self.ymid = (self.ny - 1) / 2.
+
+		# Interface (width)
+		self.ninterface = int(ceil(self.linterface / self.zres)) + 1
+		self.dzinterface = self.linterface / self.zres
+
+		# Exchange coupling
+		self.has_exchange = self.layerstack.has_exchange()
+
+	def to_dict(self, material_format = 'sub'):
+		"""Return a dict composed of the class's attributes."""
+		paramdict = {
+			'norbitals': self.norbitals,
+			'norb': self.norbitals,
+			'zres': self.zres,
+			'yres': self.yres,
+			'linterface': self.linterface,
+			'zinterface': self.zinterface,
+			'ninterface': self.ninterface,
+			'nzinterface': self.ninterface,
+			'dzinterface': self.dzinterface,
+			'yconfinement': self.yconfinement,
+			'a': self.a_lattice,
+			'b': self.magn,
+			'magn': self.magn,
+			't': self.temperature,
+			'temp': self.temperature,
+			'l': self.lz_thick,
+			'd': self.lz_thick,
+			'thickness': self.lz_thick,
+			'w': self.ly_width,
+			'width': self.ly_width,
+			'ny': self.ny,
+			'nz': self.nz,
+			'nlayer': self.nlayer,
+			'ymid': self.ymid,
+		}
+		if isinstance(self.substrate_material, Material):
+			paramdict['msubst'] = self.substrate_material.format(fmt = material_format)
+		elif isinstance(self.substrate_material, str):
+			paramdict['msubst'] = self.substrate_material
+		# Layerstack variables:
+		for i in range(0, self.layerstack.nlayer):
+			paramdict['layername(%i)' % (i+1)] = self.layerstack.names[i]
+			paramdict['lname(%i)' % (i+1)] = self.layerstack.names[i]
+			paramdict['layernz(%i)' % (i+1)] = self.layerstack.thicknesses_n[i]
+			paramdict['nzlayer(%i)' % (i+1)] = self.layerstack.thicknesses_n[i]
+			paramdict['layerl(%i)' % (i+1)] = self.layerstack.thicknesses_z[i]
+			paramdict['llayer(%i)' % (i+1)] = self.layerstack.thicknesses_z[i]
+			paramdict['dlayer(%i)' % (i+1)] = self.layerstack.thicknesses_z[i]
+			paramdict['layermater(%i)' % (i+1)] = self.layer_material[i].format(fmt = material_format)
+			paramdict['mlayer(%i)' % (i+1)] = self.layer_material[i].format(fmt = material_format)
+			paramdict['nzminlayer(%i)' % (i+1)] = self.layerstack.zinterface[i]
+			paramdict['nzmaxlayer(%i)' % (i+1)] = self.layerstack.zinterface[i + 1]
+			paramdict['zminlayer(%i)' % (i+1)] = self.layerstack.zinterface_nm[i]
+			paramdict['zmaxlayer(%i)' % (i+1)] = self.layerstack.zinterface_nm[i + 1]
+		return paramdict
+
+	def diff(self, other):
+		"""For a pair of PhysParams instances, find their differences
+
+		Arguments:
+		other   PhysParams instance
+
+		Returns:
+		A dict instance. The keys are where the two parameter dicts (obtained by
+		method to_dict()) differ. The values are 2-tuples of the values. If the
+		key is missing in one of the PhysParams instances, then the
+		corresponding member of the tuple is None.
+		"""
+		params_dict1 = self.to_dict()
+		params_dict2 = other.to_dict()
+		diff_dict = {}
+		for p in params_dict1:
+			if p not in params_dict2:
+				diff_dict[p] = (params_dict1[p], None)
+			elif params_dict1[p] != params_dict2[p]:
+				diff_dict[p] = (params_dict1[p], params_dict2[p])
+		for p in params_dict2:
+			if p not in params_dict1:
+				diff_dict[p] = (None, params_dict2[p])
+		return diff_dict
+
+	def print_diff(self, arg, style = None):
+		"""Print differences between a pair of PhysParams instances.
+
+		Arguments:
+		arg     PhysParams or dict instance. If a PhysParams instance, find the
+		        difference between the two by using self.diff(arg). If a dict
+		        instance, it should be the result of a 'diff' between PhysParams
+		        instances, i.e., the values should be 2-tuples.
+		style   Determines the format. Possible values are None or 'full',
+		        'table' or 'align', 'short' or 'summary'.
+
+		No return value.
+		"""
+		if isinstance(arg, PhysParams):
+			diff = self.diff(arg)
+		elif isinstance(arg, dict):
+			diff = arg
+		else:
+			raise TypeError("Argument must be another PhysParams instance or a dict instance [from diff()]")
+		if style is None or style == "full":
+			for p in sorted(diff):
+				print("  %s: %s vs %s" % (p, diff[p][0], diff[p][1]))
+			print()
+		if style == "table" or style == "align":
+			l0, l1, l2 = 0, 0, 0
+			for p in diff:
+				l0 = max(l0, len(p))
+				l1 = max(l1, len(str(diff[p][0])))
+				l2 = max(l2, len(str(diff[p][1])))
+			fmt = "  %%-%is: %%-%is vs %%-%is" % (l0, l1, l2)
+			for p in sorted(diff):
+				print(fmt % (p, diff[p][0], diff[p][1]))
+			print()
+		elif style == "short" or style == "summary":
+			print(", ".join(sorted(diff.keys())))
+
+	def check_equal(self, arg, ignore = None):
+		"""Check whether two PhysParams instances are equal
+
+		Arguments:
+		arg     PhysParams or dict instance. If a PhysParams instance, find the
+		        difference between the two by using self.diff(arg). If a dict
+		        instance, it should be the result of a 'diff' between PhysParams
+		        instances, i.e., the values should be 2-tuples.
+		ignore  A list of keys whose values should not be compared.
+
+		Returns:
+		False if the 'param dict' of the PhysParams instances have differences,
+		otherwise True.
+		"""
+		if isinstance(arg, PhysParams):
+			diff = self.diff(arg)
+		elif isinstance(arg, dict):
+			diff = arg
+		else:
+			raise TypeError("Argument must be another PhysParams instance or a dict instance [from diff()]")
+		if ignore is None:
+			ignore = []  # default value
+		for p in diff:
+			if p not in ignore:
+				return False
+		return True
+
+	def lattice_transformed(self):
+		"""Check whether the lattice transformation is set"""
+		return self.lattice_orientation is not None
+
+	def lattice_transformed_by_matrix(self):
+		"""Check whether the lattice transformation is set and is defined as a matrix"""
+		return (self.lattice_orientation is not None) and isinstance(self.lattice_trans, np.ndarray)
+
+	def lattice_transformed_by_angle(self):
+		"""Check whether the lattice transformation is set and is defined as an angle"""
+		return isinstance(self.lattice_orientation, list) and len(self.lattice_orientation) == 1 and isinstance(self.lattice_orientation[0], (float, np.floating, int, np.integer))
+
+	def make_param_cache(self):
+		"""Cache z dependence of parameters"""
+		self.cache_z = -0.5 + 0.5 * np.arange(2 * self.nz + 1)
+		self.cache_param = self.layerstack.make_param_cache(self.cache_z, dz = 1.0, delta_if = self.dzinterface, nm = False, extend = True)
+
+	def clear_param_cache(self):
+		"""Clear cached z dependence of parameters"""
+		self.cache_z = None
+		self.cache_param = None
+		# print ("Cleared parameter cache")
+
+	def z(self, z):
+		"""Calculate and cache z dependence of parameters.
+
+		Argument:
+		z     None, integer, float, or array. If None, return value at centre of
+		      range. If integer, return value at z'th position. If float, return
+		      value at z'th position; this is especially useful for half-integer
+		      values. If array (or list, etc.), return values at all positions
+		      in array.
+
+		Note:
+		The lattice points are numbered 0, ..., nz-1. Note that the z dependence
+		is also calculated at 0.5, 1.5, ...
+
+		Performance warning:
+		Calling this function for single numbers z is relatively slow. If one
+		needs to iterate over many values, use an array input for z
+
+		Returns:
+		A dict instance. Its keys label the z-dependence parameters, its value
+		is a float or an array with the parameter value(s) at z.
+		"""
+		if z is None:
+			if self.cache_param is None:
+				self.make_param_cache()
+			z_idx = self.nz
+			return {v: self.cache_param[v][z_idx] for v in self.cache_param}
+		elif isinstance(z, (int, np.integer)):
+			if self.cache_param is None:
+				self.make_param_cache()
+			z_idx = 2 * z + 1
+			return {v: self.cache_param[v][z_idx] for v in self.cache_param}
+		elif isinstance(z, (float, np.floating)) and abs(z * 2 - round(z * 2)) < 1e-9:
+			if self.cache_param is None:
+				self.make_param_cache()
+			z_idx = int(round(2 * z + 1))
+			return {v: self.cache_param[v][z_idx] for v in self.cache_param}
+		else:
+			# Performance warning: Avoid using single numbers z in this case.
+			# For z being an array, the warning does not apply.
+			return self.layerstack.param_z(z, dz = 1.0, delta_if = self.dzinterface, nm = False, extend = True)
+
+	def zvalues_nm(self, extend = 0):
+		"""Return array of z coordinates in nm
+
+		Argument:
+		extend   Integer. Add this many values to the return array. Default: 0.
+
+		Returns:
+		Numpy array of float type, of dimension 1, and of length nz + extend.
+		"""
+		if not isinstance(extend, int):
+			raise TypeError("Argument extend must be an int instance.")
+		return ((np.arange(0, self.nz + extend, dtype = float) - 0.5 * extend) / (self.nz - 1) - 0.5) * self.lz_thick
+		## For extend = 0: (np.arange(0, self.nz, dtype = float) / (self.nz - 1) - 0.5) * self.lz_thick
+
+	def interface_z_nm(self):
+		"""Return array of the z coordinates in nm of the interfaces"""
+		return (np.array(self.zinterface, dtype = float) / (self.nz - 1) - 0.5) * self.lz_thick
+
+	def yvalues_nm(self, extend = 0):
+		"""Return array of y coordinates in nm
+		Note the slight difference to z coordinates.
+
+		Argument:
+		extend   Integer. Add this many values to the return array. Default: 0.
+
+		Returns:
+		Numpy array of float type, of dimension 1, and of length ny + extend.
+		"""
+		if not isinstance(extend, int):
+			raise TypeError("Argument extend must be an int instance.")
+		return (np.arange(0, self.ny + extend, dtype = float) + 0.5 - 0.5 * extend) * self.yres - 0.5 * self.ly_width
+		## For extend = 0: return (np.arange(0, self.ny, dtype = float) + 0.5) * self.yres - 0.5 * self.ly_width
+
+	def well_z(self, extend_nm = 0.0, strict = False):
+		"""Return bottom and top z indices of the well layer
+
+		Arguments:
+		extend_nm   Float. Subtract and add this length (in nm) to the lower and
+		            upper z coordinate, respectively. The actual extension is an
+		            integer number of lattice points. Downward rounding is used.
+		strict      True or False. If True, raise an exception if the well layer
+		            is undefined or ambiguous. If False, return (None, None) in
+		            that case.
+
+		Returns:
+		i_bottom  Float or None.
+		i_top     Float or None.
+		"""
+		jwell = self.layerstack.layer_index("well")
+		if jwell is None:
+			if strict:
+				raise ValueError("The well layer is undefined or ambiguous")
+			return None, None
+		i_bottom, i_top = self.zinterface[jwell], self.zinterface[jwell + 1]
+		extend = int(np.floor(extend_nm / self.zres + 1e-10))
+		return i_bottom - extend, i_top + extend
+
+	def well_z_nm(self, extend_nm = 0.0, strict = False):
+		"""Return bottom and top z coordinates (in nm) of the well layer
+
+		See well_z(). Note that rounding to an integer number of lattice points
+		also applies here.
+		"""
+		jwell = self.layerstack.layer_index("well")
+		if jwell is None:
+			if strict:
+				raise ValueError("The well layer is undefined or ambiguous")
+			return None, None
+		interface_nm = self.interface_z_nm()
+		z_bottom, z_top = interface_nm[jwell], interface_nm[jwell + 1]
+		extend = self.zres * np.floor(extend_nm / self.zres + 1e-10)
+		return z_bottom - extend, z_top + extend
+
+	def symmetric_z(self, strict = False):
+		"""Return z coordinates of largest symmetric extension of the well layer
+
+		Arguments:
+		strict      True or False. If True, raise an exception if the well layer
+		            is undefined or ambiguous. If False, return (None, None) in
+		            that case.
+
+		Returns:
+		z_bottom  Float or None.
+		z_top     Float or None.
+		"""
+		z_bottom, z_top = self.well_z(strict = strict)
+		if z_bottom is None or z_top is None:
+			return None, None
+		max_extend = min(z_bottom, self.nz - 1 - z_top)
+		return z_bottom - max_extend, z_top + max_extend
+
+### MISCELLANEOUS
+
+def print_length_scales(params):
+	"""Print length scales.
+
+	Argument:
+	params   PhysParams instance.
+	"""
+	magn = params.magn if isinstance(params.magn, (float, np.floating)) else params.magn.z()
+	print()
+	print("y resolution: %8.3f nm" % params.yres)
+	lB = float('inf') if magn == 0.0 else 1. / sqrt(eoverhbar * abs(magn))
+	print("l_B         :", "   inf" if magn == 0.0 else "%8.3f nm" % lB)
+	print("2 pi l_B^2  :", "   inf" if magn == 0.0 else "%8.3f nm^2" % (2. * pi / (eoverhbar * abs(magn))))
+	print("y width     : %8.3f nm" % params.ly_width)
+	print("flux = B*b*c: %8.3f T nm^2" % (magn * params.yres * params.a_lattice))
+	print("flux / (h/e) = b * c / (2 pi lB^2)")
+	flux = ((eoverhbar / 2 / pi) * magn * params.yres * params.a_lattice)
+	if flux > 1e-1:
+		print("            : %8.3f" % flux)
+	else:
+		print("            : %8.3f * 10^-3" % (flux * 1000))
+
+	if magn > 0.0 and params.yres > lB / 4.:
+		sys.stderr.write("Warning: y resolution is coarse compared to magnetic length\n")
+	if params.ly_width < 4 * lB:
+		sys.stderr.write("Warning: Width is small compared to magnetic length\n")
+	print()
diff --git a/kdotpy-v1.0.0/src/kdotpy/phystext.py b/kdotpy-v1.0.0/src/kdotpy/phystext.py
new file mode 100644
index 0000000000000000000000000000000000000000..847b20e2dab9c1ef080cb90ef31cae9ee163c4a7
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/phystext.py
@@ -0,0 +1,319 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import sys
+import re
+
+### HELPER FUNCTIONS ###
+superscriptdigits = ['\u2070', '\xB9', '\xB2', '\xB3', '\u2074', '\u2075', '\u2076', '\u2077', '\u2078', '\u2079']
+def unicode_power(exponent, quantity = "10"):
+	"""Power-of-ten using Unicode superscripts.
+
+	Arguments:
+	exponent   Integer. The exponent n in 10^n. This number may be positive,
+	           negative, or zero.
+	quantity   String. How to express the base ('10').
+
+	Returns:
+	String.
+	"""
+	if exponent == 0:
+		return ""
+	elif exponent > 0:
+		return quantity + "".join(([superscriptdigits[int(c)] for c in ("%i" % exponent)]))
+	else:
+		return quantity + "\u207B" + "".join(([superscriptdigits[int(c)] for c in ("%i" % -exponent)]))
+
+def format_value(x, style = None, fmt = None):
+	"""Format a floating-point numerical value
+
+	Arguments:
+	x      Float. The numerical value.
+	style  String. One of the formatting styles 'raw', 'plain', 'unicode',
+	       'tex'.
+	fmt    String. Formatting string such as '{:.3g}' for initial conversion of
+	       units to string. Only e, f, g are permitted as formatting types.
+	       Format modifiers (e.g., number of digits) are allowed.
+
+	Returns:
+	String.
+	"""
+	if fmt is None:
+		fmt = '{:g}'
+	if not isinstance(fmt, str):
+		raise TypeError("Argument fmt must be a string.")
+	m = re.match("^{:[^{}]*[efg]}$", fmt)
+	if m is None:
+		raise ValueError("Argument fmt must be a format string of types e, f, or g, like '{:.3g}'.")
+	s = fmt.format(x)
+	if style == 'none' or style == 'false':
+		return None
+	elif style == 'raw':
+		return s
+	elif style == 'plain':
+		if 'e' in s:
+			s1, s2 = s.split('e')
+			return "{} x 10^{}".format(s1, int(s2))
+		return s
+	elif style == 'unicode':
+		if 'e' in s:
+			s1, s2 = s.split('e')
+			return s1 + " \u00d7 " + unicode_power(int(s2))
+		return s.replace('inf', '\u221e')
+	elif style == 'tex':
+		if 'e' in s:
+			s1, s2 = s.split('e')
+			return "${} \\times 10^{{{}}}$".format(s1, int(s2))
+		return s.replace('inf', r'\infty')
+	else:
+		return None
+
+
+### UNIT FORMATTING ###
+def format_unit(*args, style = None, negexp = True):
+	"""Format a unit.
+
+	Arguments:
+	*args   One of the following: If None, Return empty string. If a single
+	        string, parse it as a raw_unit_str to a sequence of units and powers
+	        together with a power of ten. For more arguments, an int is treated
+	        as a power of ten, a string as a simple unit (e.g., 'meV', 'nm'),
+	        or a tuple (str, int) where the str is a simple unit and int its
+	        power, so that e.g. ('nm', -2) means 'nm^-2'.
+	style   String. One of the formatting styles 'raw', 'plain', 'unicode',
+	        'tex'.
+	negexp  True or False. If True, style quotients using negative exponents
+	        (e.g., 'm s^-1'). If False, use a slash notation (e.g., 'm/s').
+
+	Returns:
+	String.
+	"""
+	if len(args) == 1 and args[0] is None:
+		return ""
+	if style == 'none' or style == 'false':
+		return None
+	if len(args) == 1 and isinstance(args[0], str):
+		raw_unit_str = args[0]
+		if style is None or style == 'raw':
+			return raw_unit_str
+		# Parse unit string:
+		# value
+		m = re.match(r"(1(\.0*)?[eE]([+-]?[0-9]+)|10(\^|\*\*)([+-]?[0-9]+))", raw_unit_str)
+		if m is not None:
+			unit10p = int(m.group(5)) if m.group(2) is None else int(m.group(2))
+			unit_str = raw_unit_str[m.end(0):]
+		else:
+			unit10p = 0
+			unit_str = raw_unit_str
+		# unit labels
+		matches = re.findall(r"\s*([/\*]?)\s*([a-zA-Z_µ]+)(\s*(\^|\*\*)\s*([+-]?[0-9]+))?", unit_str)
+		unitdata = []
+		for m in matches:
+			unit = m[1]
+			power = 1 if m[4] is None or len(m[4]) == 0 else int(m[4])
+			if m[0] == '/':
+				power *= -1
+			unitdata.append((unit, power))
+	elif len(args) >= 1:
+		raw_unit_str = None
+		unitdata = []
+		unit10p = 0
+		for arg in args:
+			if isinstance(arg, int):
+				unit10p += arg
+			elif isinstance(arg, str):
+				unitdata.append((arg, 1))
+			elif isinstance(arg, tuple) and len(arg) == 2 and isinstance(arg[1], int):
+				if arg[0] == 10 or arg[0] == '10':
+					unit10p += arg[1]
+				elif isinstance(arg[0], str):
+					unitdata.append(arg)
+				else:
+					raise ValueError("Invalid tuple input")
+			else:
+				raise ValueError("Invalid tuple input")
+	else:
+		raise ValueError("Invalid input argument")
+
+	# Build formatted unit string
+	if style == 'tex':
+		valstr = "10^{%i}" % unit10p if unit10p != 0 else ""
+		ustr = ""
+		for unit, power in unitdata:
+			if power == 1:
+				ustr += r"\,\mathrm{%s}" % unit
+			elif power > 1 or (power < 0 and negexp):
+				ustr += r"\,\mathrm{%s}^{%i}" % (unit, power)
+			elif power == -1:
+				ustr += r"/\mathrm{%s}" % unit
+			elif power < -1:
+				ustr += r"/\mathrm{%s}^{%i}" % (unit, -power)
+		if valstr == "" and ustr.startswith("/"):
+			valstr = "1"
+		elif valstr == "" and ustr.startswith(r'\,'):
+			ustr = ustr[2:]
+		return '$' + valstr + ustr + '$'
+	elif style == 'plain' or (style == 'raw' and raw_unit_str is None):
+		valstr = "10^%i" % unit10p if unit10p != 0 else ""
+		ustr = ""
+		for unit, power in unitdata:
+			if power == 1:
+				ustr += " %s" % unit
+			elif power > 1 or (power < 0 and negexp):
+				ustr += " %s^%i" % (unit, power)
+			elif power == -1:
+				ustr += "/%s" % unit
+			elif power < -1:
+				ustr += "/%s^%i" % (unit, -power)
+		if valstr == "" and ustr.startswith("/"):
+			valstr = "1"
+		return (valstr + ustr).lstrip(' ')
+	elif style == "unicode":
+		valstr = unicode_power(unit10p, "10")
+		ustr = ""
+		for unit, power in unitdata:
+			if power == 1:
+				ustr += " %s" % unit
+			elif power > 1 or (power < 0 and negexp):
+				ustr += " " + unicode_power(power, unit)
+			elif power == -1:
+				ustr += "/%s" % unit
+			elif power < -1:
+				ustr += "/" + unicode_power(-power, unit)
+		if valstr == "" and ustr.startswith("/"):
+			valstr = "1"
+		return (valstr + ustr).lstrip(' ')
+	elif style == 'raw':
+		return raw_unit_str
+	else:
+		return None
+
+def is_known_vector(q):
+	for x in ['k', 'b', 'a']:
+		if q.startswith(x):
+			prefix = x
+			break
+	else:
+		return False
+	if q == prefix:
+		return True
+	q2 = q[len(prefix):]
+	return q2 in ['r', 'x', 'y', 'z', 'phi', 'theta']
+
+def format_vector_unit(q, style = None, negexp = True, degrees = True):
+	"""Format unit for vector k, b, and their (cartesian/angular) components"""
+	if not is_known_vector(q):
+		sys.stderr.write("Warning (vector_unit): Unknown vector quantity '%s'\n" % q)
+		return None
+	if q.endswith('phi') or q.endswith('theta'):
+		if degrees:
+			return {'none': None, 'false': None, 'raw': 'deg', 'plain': 'deg', 'unicode': '\xb0', 'tex': '$^{\\circ}$'}[style]
+		else:
+			return {'none': None, 'false': None, 'raw': 'rad', 'plain': 'deg', 'unicode': 'rad', 'tex': '$\\mathrm{rad}$'}[style]
+	if q.startswith('k'):
+		return format_unit(('nm', -1), style = style, negexp = negexp)
+	if q.startswith('b'):
+		return format_unit('T', style = style, negexp = negexp)
+	if q.startswith('a'):
+		return format_unit('1', style = style, negexp = negexp)
+	sys.stderr.write("Warning (vector_unit): Unknown vector quantity '%s'\n" % q)
+	return None
+
+### PHYSICAL QUANTITY FORMATTING ###
+def format_vector_q(q, style = None):
+	if not is_known_vector(q):
+		sys.stderr.write("Warning (vector_q): Unknown vector quantity '%s'\n" % q)
+		return None
+	prefix = ''
+	for x in ['k', 'b', 'a']:
+		if q.startswith(x):
+			prefix = x
+			break
+	if style == 'none' or style == 'false':
+		return None
+	elif style == 'raw':
+		return q
+	comp = '' if q == prefix else q[len(prefix):]
+	if style == 'plain':
+		return 'B' + comp if prefix == 'b' else q
+	elif style == 'unicode':
+		if comp == 'theta':
+			return '\u03b8' if prefix == 'k' else '\u03b8B' if prefix == 'b' else '\u03b8' + prefix
+		elif comp == 'phi':
+			return '\u03d5' if prefix == 'k' else '\u03d5B' if prefix == 'b' else '\u03b8' + prefix
+		else:
+			return q
+	elif style == 'tex':
+		prefix = 'B' if prefix == 'b' else prefix
+		if comp == '':
+			return '$%s$' % prefix
+		elif comp == 'theta':
+			return r'$\theta$' if prefix == 'k' else r'$\theta_B$' if prefix == 'b' else ('$\\theta_{%s}$' % prefix)
+		elif comp == 'phi':
+			return r'$\phi$' if prefix == 'k' else r'$\phi_B$' if prefix == 'b' else ('$\\phi_{%s}$' % prefix)
+		else:
+			return '$%s_{%s}$' % (prefix, comp)
+	else:
+		return None
+
+### MISCELLANEOUS ###
+def orbital_labels(style = None, norb = 8):
+	"""Return list of formatted orbital labels"""
+	if norb not in [6, 8]:
+		raise ValueError("Argument norb must be 6 or 8")
+	raw_labels = ['G6,+1/2', 'G6,-1/2', 'G8,+3/2', 'G8,+1/2', 'G8,-1/2', 'G8,-3/2', 'G7,+1/2', 'G7,-1/2']
+	if style == 'none' or style == 'false':
+		return None
+	elif style == 'raw':
+		return raw_labels[:norb]
+	elif style == 'plain':
+		return [x.replace('G', 'Gamma') for x in raw_labels[:norb]]
+	elif style == 'unicode':
+		return [x.replace('G', '\u0393') for x in raw_labels[:norb]]
+	elif style == 'tex':
+		return [
+			'$\\Gamma_6,+\\frac{1}{2}$', '$\\Gamma_6,-\\frac{1}{2}$',
+			'$\\Gamma_8,+\\frac{3}{2}$', '$\\Gamma_8,+\\frac{1}{2}$',
+			'$\\Gamma_8,-\\frac{1}{2}$', '$\\Gamma_8,-\\frac{3}{2}$',
+			'$\\Gamma_7,+\\frac{1}{2}$', '$\\Gamma_7,-\\frac{1}{2}$'
+		][:norb]
+	else:
+		return None
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/ploto/__init__.py b/kdotpy-v1.0.0/src/kdotpy/ploto/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f5c3581a41f700a1e1a951133c0256946aa8dc5b
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/ploto/__init__.py
@@ -0,0 +1,63 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+from matplotlib import use as mpluse
+mpluse('pdf')
+
+# in tools.py:
+from .tools import initialize, get_transitions_deltaemax
+
+# in toolstext.py:
+from .toolstext import format_axis_unit, format_axis_label
+
+# in auxil.py:
+# NOTE: The former name aux.py leads to problems in Windows based file systems
+from .auxil import observable, integrated_observable, transitions, potential, q_z
+
+# in disp.py:
+from .disp import bands_1d, bands_2d, add_bhz, add_transitions
+
+# in dos.py:
+from .dos import dos_idos, dos_ll, local_density, density2d, at_constant_dens_ll, densityz, densityz_energy, add_curves
+from .dos import add_contours as add_dos_contours
+
+# in wf.py:
+from .wf import wavefunction_z, wavefunction_zy, abs_wavefunctions_z, abs_wavefunctions_y, wf_add_bandlabels
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/ploto/auxil.py b/kdotpy-v1.0.0/src/kdotpy/ploto/auxil.py
new file mode 100644
index 0000000000000000000000000000000000000000..21a8de1b20edba42d22498461f33489fdd66f91f
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/ploto/auxil.py
@@ -0,0 +1,588 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+import sys
+
+from matplotlib import use as mpluse
+mpluse('pdf')
+import matplotlib.pyplot as plt
+from matplotlib import rcParams
+from matplotlib.backends.backend_pdf import PdfPages
+
+from ..config import get_config_bool, get_config_num
+from .colortools import get_colormap
+from .tools import get_fignum, get_plot_size, plot_energies, log10_clip, log10_scale
+from .tools import get_transitions_deltaemax, get_transitions_log_limits, get_transitions_quantity
+from .toolslegend import add_colorbar, get_legend_file
+from .toolstext import format_axis_unit, obs_latex, set_xlabel, set_ylabel, set_disp_axis_label
+from .toolsticks import add_frequency_ticks, set_ticks
+
+from ..observables import regularize_observable
+from ..momentum import Vector, VectorGrid
+from ..materials import material_parameters_tex, material_parameters_units
+
+
+def observable(eidata, params, obs, which = None, filename = "", regularize = True):
+	"""Plot value of an observable as function of momentum or magnetic field.
+
+	Arguments:
+	eidata      DiagData instance
+	params      PhysParams instance
+	obs         The observable id
+	which       Which states to include in the plot. This can be either None
+	            (plot all states) or a 2-tuple of integers or None, which
+	            specifies the range of bindex values that should be shown.
+	filename    Output filename
+	regularize  Whether to connect states with apparently matching observable
+	            curves, rather than following the bindex value.
+
+	Returns:
+	matplotlib Figure instance
+	"""
+	fig = plt.figure(get_fignum(), figsize = get_plot_size('s'))
+	plt.subplots_adjust(**get_plot_size('subplot'))
+	ax = fig.add_subplot(1, 1, 1)
+
+	colors = ['r', 'c', 'b', 'g', 'm', 'y']
+	styles = ['-', '--', ':', '-.']
+	allplots = []
+	legendlabels = []
+
+	eidata0 = eidata.get_base_point()
+	if eidata0.bindex is not None and eidata0.char is not None and eidata0.llindex is None:
+		idx_char_dict = {}
+		for b, c in zip(eidata0.bindex, eidata0.char):
+			idx_char_dict[b] = c
+
+		eivals = eidata.get_eival_by_bindex()
+		obsvals = eidata.get_observable_by_bindex(obs)
+
+		if regularize:
+			for e1 in eidata0.eival:
+				other_eival = eidata0.eival[eidata0.eival > e1]
+				if len(other_eival) > 0 and np.min(other_eival) - e1 < 0.03:
+					i1 = eidata0.get_index(e1)
+					i2 = eidata0.get_index(np.min(other_eival))
+					b1 = eidata0.bindex[i1]
+					b2 = eidata0.bindex[i2]
+					eivals[b1], eivals[b2], obsvals[b1], obsvals[b2] = regularize_observable(eivals[b1], eivals[b2], obsvals[b1], obsvals[b2])
+
+		# select bands and order them
+		bands = []
+		for b in idx_char_dict:
+			if isinstance(which, tuple) and len(which) == 2:
+				if isinstance(which[0], (int, np.integer)) and b < which[0]:
+					continue
+				elif isinstance(which[1], (int, np.integer)) and b > which[1]:
+					continue
+			bands.append(b)
+		bands = sorted(bands)
+		if eidata.get_paramval() is None:
+			kval = [k.len() for k in eidata.get_momenta()]
+		else:
+			kval = [k.len() if isinstance(k, Vector) else k for k in eidata.get_paramval()]
+		for jj, b in enumerate(bands):
+			p, = plt.plot(kval, np.real(obsvals[b]), colors[(jj // 2) % 6] + styles[2 * ((jj % 24) // 12) + (jj % 2)])
+			allplots.append(p)
+
+			i = eidata0.get_index((b,))
+			legendlabels.append("$%i$ %s" % (eidata0.eival[i], eidata0.char[i].replace('+', '$+$').replace('-', '$-$')))
+
+	set_xlabel("$k$", r"$\mathrm{nm}^{-1}$")
+	obsstr = obs_latex(obs)
+	ylabel = str(obs) if obsstr is None else " ".join(obsstr) if isinstance(obsstr, (tuple, list)) else obsstr
+	plt.ylabel(ylabel.replace("\n", " "))
+	ax.legend(handles = allplots, labels = legendlabels, loc='upper right', ncol=2)
+	set_ticks()
+
+	if filename != "":
+		plt.savefig(filename)
+	plt.close()
+	return fig
+
+def integrated_observable(params, ee, int_obs, energies = None, filename = "", title = None, title_val = None, idos = None, orange = None, xlabel = None, ylabel = None):
+	"""Plot sum of observables for all states below a certain energy or integrated DOS value.
+
+	Arguments:
+	params      PhysParams instance
+	ee          numpy array with energy values (values for horizontal axis)
+	int_obs     Data (integrated observable)
+	energies    Special energies (Fermi energy, chemical potential) to be shown
+	filename    Output filename
+	title       Plot title
+	title_val   None, number, tuple, list or array. If a number, print this
+	            value in the plot title using % formatting. A tuple can be used
+	            for multiple values. If a list or array, take the subsequent
+	            values for the subsequent plot.
+	idos        numpy array with integrated DOS values. If this is given, plot
+	            as function of integrated DOS rather than energy.
+	orange      Range of observable values
+	xlabel      Label of the horizontal axis
+	ylabel      Label of the vertical axis
+
+	Returns:
+	matplotlib Figure instance
+	"""
+	if filename == "":
+		raise ValueError("Argument filename may not be empty")
+	## Extract +/- contributions if input is list/tuple of length 2
+	int_obs_p, int_obs_m = None, None
+	if isinstance(int_obs, (tuple, list)) and len(int_obs) == 2:
+		int_obs_p = np.atleast_2d(int_obs[0])
+		int_obs_m = np.atleast_2d(int_obs[1])
+		omin, omax = np.amin([int_obs_p, int_obs_m]), np.amax([int_obs_p, int_obs_m])
+		int_obs = int_obs_p + int_obs_m
+	else:
+		int_obs = np.atleast_2d(int_obs)
+		omin, omax = np.amin(int_obs), np.amax(int_obs)
+	if int_obs.ndim != 2:
+		raise ValueError("Argument int_obs has invalid number of dimensions")
+	if int_obs.shape[-1] != len(ee):
+		raise ValueError("Invalid shape for argument int_obs")
+	if idos is not None and not (isinstance(idos, np.ndarray) and idos.ndim == 1):
+		raise TypeError("Argument idos must be a 1-dim numpy array or None")
+	if idos is not None and len(idos) != len(ee):
+		# Try to interpolate, assume energy range is correct
+		sys.stderr.write("Warning (ploto.integrated_observable): Size for argument idos is (%i,) while (%i,) is expected. We interpolate, assuming the energy range is correct.\n" % (len(idos), len(ee)))
+		idos_ee = np.linspace(ee.min(), ee.max(), len(idos))
+		idos = np.interp(ee, idos_ee, idos)
+
+	nfigs = int_obs.shape[0]
+	if orange is None:
+		orange = [1.1 * omin - 0.1 * omax, 1.1 * omax - 0.1 * omin]
+
+	pdfpages = PdfPages(filename) if nfigs > 1 else None
+	for i in range(0, nfigs):
+		fig = plt.figure(get_fignum(), figsize = get_plot_size('s'))
+		plt.subplots_adjust(**get_plot_size('subplot'))
+		ax = fig.add_subplot(1, 1, 1)
+
+		if idos is None:
+			plt.plot([0.0, 0.0], [min(ee), max(ee)], 'k:')
+			plt.plot(int_obs[i], ee, 'g-')
+			if int_obs_p is not None and int_obs_m is not None:
+				plt.plot(int_obs_p[i], ee, 'r-')
+				plt.plot(int_obs_m[i], ee, 'b-')
+			plt.axis([orange[0], orange[1], min(ee), max(ee)])
+			if ylabel is not None:
+				plt.ylabel(ylabel)
+			else:
+				set_ylabel('$E$', '$\\mathrm{meV}$')
+		else:
+			# idos(ee) vs int_obs(ee)
+			plt.plot([0.0, 0.0], [idos[0], idos[-1]], 'k:')
+			plt.plot(int_obs[i], idos, 'g-')
+			if int_obs_p is not None and int_obs_m is not None:
+				plt.plot(int_obs_p[i], idos, 'r-')
+				plt.plot(int_obs_m[i], idos, 'b-')
+			# search and plot gaps
+			for j in range(0, len(idos) - 1):
+				if idos[j] == idos[j+1] and (j == 0 or idos[j-1] != idos[j]):
+					if int_obs_p is not None and int_obs_m is not None:
+						plt.plot(int_obs_p[i, j], idos[j], 'ro')
+						plt.plot(int_obs_m[i, j], idos[j], 'bo')
+					else:
+						plt.plot(int_obs[i, j], idos[j], 'go')
+			plt.axis([orange[0], orange[1], idos[0], idos[-1]])
+			if ylabel is not None:
+				plt.ylabel(ylabel)
+			else:
+				set_ylabel('IDOS $n$', '$e / \\mathrm{nm}^2$')
+		if xlabel is not None:
+			plt.xlabel(xlabel)
+		set_ticks()
+
+		plot_energies(energies, xval = orange)
+
+		if (title is not None) and (title != ""):
+			if isinstance(title_val, (list, np.ndarray)):
+				title_str = title % title_val[i]
+			elif isinstance(title_val, (tuple, int, float, np.integer, np.floating)):
+				title_str = title % title_val
+			else:
+				title_str = title
+			ax.text(0.5, 0.98, title_str, ha='center', va='top', transform=ax.transAxes)
+
+		if pdfpages is None:
+			plt.savefig(filename)
+		else:
+			pdfpages.savefig(fig)
+		plt.close()
+
+	if pdfpages is not None:
+		pdfpages.close()
+
+def transitions(data, filename = "", xrange = None, legend = False, title = None,
+                showplot = False, paramstr = "", plotvar = None, colormap = 'hot_r', deltaemax = None, **plotopts):
+	"""Plot optical transitions.
+	The output is a plot with magnetic field B (or momentum k) on the horizontal
+	axis, and the energy difference Delta E of the transitions on the vertical
+	axis. The colour encodes the amplitude of the transitions.
+
+	Arguments:
+	data        DiagData instance for which the DiagDataPoint elements have a
+	            valid TransitionsData element (ddp.transitions is not None).
+	filename    Output file name
+	erange      2-tuple; if present, do not consider states with energies
+	            outside this range
+	xrange      2-tuple; range of the horizontal axis
+	legend      Whether to show a legend (colour bar)
+	title       Plot title
+	showplot    If True, show plot on screen. (Debug option)
+	paramstr    String that determines horizontal axis label
+	plotvar     If set, plot against these values rather than the 'natural'
+	            component in the VectorGrid (data.grid). For example, if the
+	            data is as function of a magnetic field in some direction in
+	            spherical coordinates, one can use 'bx' to plot against the Bx
+	            component.
+	colormap    matplotlib colormap for colouring the data points
+	deltaemax   Maximum value on the vertical axis
+	plotopts    Keyword list that catches further unused plot options
+
+	Returns:
+	matplotlib Figure instance
+	"""
+	fig = plt.figure(get_fignum(), figsize = get_plot_size('s'))
+	fig_hsize = get_plot_size('pw')
+	fig_vsize = get_plot_size('ph')
+	margin_l = get_plot_size('ml')
+	margin_b = get_plot_size('mb')
+	hsize = get_plot_size('h')
+	vsize = get_plot_size('v')
+	plt.subplots_adjust(**get_plot_size('subplot'))
+	ax = fig.add_subplot(1, 1, 1)
+	markersize = rcParams['lines.markersize']
+	qty = get_transitions_quantity()
+
+	if len(data) == 0:
+		sys.stderr.write("Warning (ploto.transitions): No data.\n")
+		return fig
+	if any([d.transitions is None for d in data]):
+		sys.stderr.write("Warning (ploto.transitions): No transitions data.\n")
+		return fig
+	if sum([d.transitions.n for d in data]) == 0:
+		sys.stderr.write("Warning (ploto.transitions): Empty transitions data. Nothing to be plotted.\n")
+		return fig
+
+	# Determine ranges
+	if data[0].paramval is None:
+		vgrid = data.get_momentum_grid()
+		if isinstance(vgrid, VectorGrid):
+			if len(vgrid.var) == 1:
+				kval, kname, pval, pname = vgrid.get_var_const()
+			else:
+				sys.stderr.write("ERROR (ploto.transitions): Invalid dimension for VectorGrid\n")
+				return
+		else:
+			sys.stderr.write("ERROR (ploto.transitions): Data must include a VectorGrid instance\n")
+			return
+	else:
+		vgrid = data.get_paramval()
+		if isinstance(vgrid, VectorGrid):
+			kval, kname, pval, pname = vgrid.get_var_const()
+		else:
+			sys.stderr.write("ERROR (ploto.transitions): Data must include a VectorGrid instance\n")
+			return
+		# Special case: (btheta, bphi) -> btheta
+		if pname == ('btheta', 'bphi') and abs(pval[1]) < 1e-6:
+			pname = 'btheta'
+			pval = pval[0]
+
+	if plotvar is not None:
+		if not isinstance(vgrid, VectorGrid):
+			sys.stderr.write("Warning (ploto.transitions): Option 'plotvar' not supported if input variables are not in VectorGrid format.\n")
+		else:
+			try:
+				kval = vgrid.get_values(plotvar)
+				kname = plotvar
+			except:
+				sys.stderr.write("Warning (ploto.transitions): Invalid 'plotvar'. The plot will use the default variable instead.\n")
+
+	if isinstance(xrange, (list, tuple)) and len(xrange) == 2:
+		kmin = min(xrange)
+		kmax = max(xrange)
+	elif xrange is None:
+		kmin0 = min(kval)
+		kmax0 = max(kval)
+		extend_xaxis = get_config_num('fig_extend_xaxis', minval = 0)
+		kmin = kmin0 - extend_xaxis * (kmax0 - kmin0) if extend_xaxis > 0 else kmin0
+		kmax = kmax0 + extend_xaxis * (kmax0 - kmin0) if extend_xaxis > 0 else kmax0
+	else:
+		raise TypeError("Argument xrange must be a list of length 2 or None")
+
+	cmap = get_colormap(colormap)
+
+	# Determine color scale (maximum value)
+	qmin, qmax = get_transitions_log_limits(data, qty = qty)
+
+	# Automatic determination of vertical limit
+	if deltaemax is not None:
+		emax = deltaemax
+	else:
+		emax, _ = get_transitions_deltaemax(data, qty = qty)
+
+	# Plots
+	for k, d in zip(kval, data):
+		if d is None or d.transitions is None or d.transitions.n == 0:
+			continue
+		td = d.transitions  # shortcut
+		if td is not None and td.n > 0:
+			amp = td.get_values(qty)
+			q = log10_scale(amp, qmin, qmax)  # quantity that determines colouring and marker size
+			colorval = cmap(0.1 + 0.9 * q**3)
+			sizes = (0.02 + 0.98 * q**5) * markersize**2
+			sel = (td.delta_e() <= 1.2 * emax)  # do not plot points outside the plot range
+			nsel = np.count_nonzero(sel)
+			# for e1, e2, de, q1 in zip(td.energies[sel,0], td.energies[sel,1],td.delta_e()[sel], q[sel]):
+			# 	print  ("%10s %7.3f %7.3f %7.3f %5.3f" % (k, e1, e2, de, q1))
+			ax.scatter([k] * nsel, td.delta_e()[sel], c = colorval[sel], s = sizes[sel])
+
+	plt.axis([kmin, kmax, 0, emax])
+	set_disp_axis_label(kname, set_x = True)
+	set_ylabel("$\\Delta E$", "$\\mathrm{meV}$")
+	set_ticks()
+	add_frequency_ticks()
+
+	if legend:
+		if qty in ['deltae', 'delta_e']:
+			legtext = r"$\Delta E$ " + format_axis_unit("$\\mathrm{meV}$")
+		elif qty in ['freq', 'freqthz', 'freq_thz']:
+			legtext = "Frequency\n$\\nu$ " + format_axis_unit("$\\mathrm{THz}$")
+		elif qty in ['lambda', 'wavelength', 'lambdaum', 'lambda_um']:
+			legtext = "Wave length\n$\\lambda$ " + format_axis_unit("$\\mathrm{\\mu m}$")
+		elif qty == 'occupancy':
+			legtext = "occupancy\n$f_2-f_1$ " + format_axis_unit("1")
+		elif qty == 'amplitude':
+			legtext = "amplitude\n" + format_axis_unit("$\\mathrm{nm}^2\\,\\mathrm{ns}^{-2}\\,\\mathrm{meV}^{-1}$")
+		elif qty in ['rate', 'ratedensity', 'rate_density']:
+			legtext = "rate density\n" + format_axis_unit("$\\mathrm{mV}^{-2}\\,\\mathrm{ns}^{-1}$")
+		elif qty == 'absorption':
+			legtext = "absorption\n$A$ " + format_axis_unit("1")
+		else:
+			legtext = "??"
+		filename_leg = get_legend_file(filename)
+		add_colorbar(qmin, qmax, cmap = colormap, transitions = True, markersize = markersize, label = legtext, filename = filename_leg)
+
+	# Add labels (energy, LL index) at the right-hand edge
+	labels = get_config_bool('plot_transitions_labels')  # ignore function argument 'labels'
+	if labels:
+		td = data[-1].transitions
+		amp = td.get_values(qty)
+		q = log10_clip(amp, 0, qmax)
+		order = np.argsort(-amp)  # sort in descending order
+		p = 0  # counter of printed values
+		for o in order:
+			delta_e = np.abs(td.energies[o, 1] - td.energies[o, 0])
+			yval = delta_e / emax
+			if 0.02 <= yval <= 0.98:
+				min_member = 0 if td.llindex[o, 0] < td.llindex[o, 1] else 1
+				labeltxt = "$%.1f$ ($%i$)" % (td.energies[o, min_member], td.llindex[o, min_member])
+				ax.text(0.98, delta_e / emax, labeltxt, fontsize=6, ha='right', va='center', transform=ax.transAxes)
+				p += 1
+			if p >= 6:
+				break
+			if q[o] / qmax < 0.4:
+				break
+
+	if (title is not None) and (title != ""):
+		ax.text(0.5, 0.98, title, ha='center', va='top', transform=ax.transAxes)
+	if filename != "" and filename is not None:
+		plt.savefig(filename)
+	if showplot:
+		plt.show()
+	return fig
+
+def potential(params, pot, filename = "", **kwds):
+	"""Plot potential as function of z.
+	Thin wrapper for ploto.q_z()
+	"""
+	pot = np.asarray(pot)
+	if pot.ndim == 1:
+		q_z(params, pot, filename = filename, **kwds)
+	elif pot.ndim == 2 and pot.shape[1] == params.norbitals:
+		q_z(params, pot.transpose(), filename = filename, **kwds)
+		# TODO: Plot legends
+	else:
+		sys.stderr.write("ERROR (ploto.potential): Input array (argument pot) has invalid shape.\n")
+	return
+
+def q_z(params, qty, filename = "", title = None, ylabel = None, yunit = None, legend = False, text = None):
+	"""Plot a quantity as function of z.
+
+	Arguments:
+	params     PhysParams instance. The z values are extracted from this.
+	qty        If a list or numpy array of numberical value, this is interpreted
+	           as the values to be plotted. It may be 1- or 2-dimensional and
+	           one axis should be of the same length as the number of z values
+	           extracted from argument params.
+	           If a string or list of strings, extract this/these variable(s)
+	           from the PhysParams instance (argument params).
+	title      Plot title.
+	ylabel     Label of the vertical axis
+	yunit      Unit to be shown on the vertical axis
+	legend     If False, do not show a legend. A list of strings may be given
+	           corresponding to the quantities being plotted. If qty is a string
+	           or list of strings, setting legend = True will show the string(s)
+	           of qty in the legend.
+
+	Returns:
+	None
+	"""
+	# TODO: return figure
+	nz = params.nz
+	z = params.zvalues_nm()
+	zint = [(zi / (nz - 1) - 0.5) * params.lz_thick for zi in params.zinterface]
+
+	if isinstance(qty, list):
+		if len(qty) == 0:
+			return
+		elif len(qty) == nz and isinstance(qty[0], (float, np.floating, int, np.integer, complex, np.complex_)):
+			qz = np.array([qty])
+		elif isinstance(qty[0], (list, np.ndarray)) and len(qty[0]) == nz:
+			qz = np.array(qty)
+		elif isinstance(qty[0], str):
+			qz = []
+			for q in qty:
+				try:
+					qz.append([params.z(z1)[q] for z1 in range(0, nz)])  # not very efficient, but it will work
+				except:
+					pass
+			qz = np.array(qz)
+		else:
+			sys.stderr.write("ERROR (ploto.q_z): Input list has invalid shape.\n")
+			return
+	elif isinstance(qty, np.ndarray):
+		qsh = qty.shape
+		if len(qsh) == 1 and qsh[0] == nz:
+			qz = np.array([qty])
+		elif len(qsh) == 2 and qsh[1] == nz:
+			qz = np.array(qty)
+		else:
+			sys.stderr.write("ERROR (ploto.q_z): Input array has invalid shape.\n")
+			return
+	else:
+		sys.stderr.write("ERROR (ploto.q_z): Input must be array or list.\n")
+		return
+
+	if len(qz) == 0:
+		sys.stderr.write("Warning (ploto.q_z): Nothing to be plotted.\n")
+		return
+
+	fig = plt.figure(get_fignum(), figsize = get_plot_size('s'))
+	plt.subplots_adjust(**get_plot_size('subplot'))
+	ax = fig.add_subplot(1, 1, 1)
+	plt.plot([z.min(), z.max()], [0, 0], 'k--')
+
+	## Plot
+	allplots = []
+	for j, q in enumerate(qz):
+		c = ['b', 'g', 'r', 'y', 'm', 'c'][j % 6]
+		thisplot, = plt.plot(z, q, c + '-')
+		allplots.append(thisplot)
+
+	## Determine min and max
+	ymin = qz.min()
+	ymax = qz.max()
+	if ymax - ymin < 1e-6:
+		if abs(ymax) < 5e-7:
+			ymin, ymax = -1e-3, 1e-3
+
+	for zi in zint[1:-1]:
+		plt.plot([zi, zi], [ymin, ymax], 'k:')
+	plt.axis([z.min()*1.05, z.max()*1.05, ymin - 0.05 * (ymax - ymin), ymax + 0.05 * (ymax - ymin)])
+
+	## Determine label of y axis
+	ylabel1 = ""
+	if ylabel is None:
+		if isinstance(qty, list) and isinstance(qty[0], str):
+			ylabel1 = ", ".join(["$%s$" % material_parameters_tex[q] for q in qty])
+	elif isinstance(ylabel, str):
+		ylabel1 = ylabel if "$" in ylabel else "$%s$" % ylabel
+	# elif isinstance(ylabel, list):  # TODO
+
+	## Determine unit (also part of y axis label
+	## If not specified, try to do it automatically
+	if yunit is not None and yunit != "":
+		if ylabel1 != "":
+			ylabel1 += " "
+		ylabel1 += format_axis_unit("$\\mathrm{%s}$" % yunit)
+	elif isinstance(qty, list) and isinstance(qty[0], str):
+		yunit = None
+		for q in qty:
+			try:
+				u = material_parameters_units[q]
+			except:
+				u = "1"
+			if yunit is None:
+				yunit = u
+			elif yunit != u:
+				yunit = None
+				sys.stderr.write("Warning (ploto.q_z): The requested quantities %s do not have the same unit, and should therefore not be plotted together.\n" % ", ".join(qty))
+				break
+		if yunit is None:
+			if ylabel1 != "":
+				ylabel1 += format_axis_unit("respective units")
+		elif yunit != "1":
+			if ylabel1 != "":
+				ylabel1 += " "
+			ylabel1 += format_axis_unit("$\\mathrm{%s}$" % yunit)
+	plt.ylabel(ylabel1)
+	set_xlabel('$z$', '$\\mathrm{nm}$')
+	set_ticks()
+
+	if text is not None and text != "":
+		ax.text(0.03, 0.98, text, ha='left', va='top', transform=ax.transAxes)
+
+	## Plot legend; determine labels automatically if they are not specified
+	if legend:
+		if isinstance(legend, list) and len(legend) == len(qz) and isinstance(legend[0], str):
+			legendlabels = legend
+			ax.legend(handles = allplots, labels = legendlabels, loc='upper right', ncol=2)
+		elif isinstance(qty, list) and isinstance(qty[0], str):
+			legendlabels = ["$%s$" % material_parameters_tex[q] for q in qty]
+			ax.legend(handles = allplots, labels = legendlabels, loc='upper right', ncol=2)
+		else:
+			sys.stderr.write("Warning (ploto.q_z): A legend has been requested, but cannot be shown because the labels are not given.\n")
+	if filename != "" and filename is not None:
+		plt.savefig(filename)
+	plt.close()
+
+	return
diff --git a/kdotpy-v1.0.0/src/kdotpy/ploto/colormaps.py b/kdotpy-v1.0.0/src/kdotpy/ploto/colormaps.py
new file mode 100644
index 0000000000000000000000000000000000000000..9efe954de7c01da6b6d8611b3602318addb48536
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/ploto/colormaps.py
@@ -0,0 +1,105 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import matplotlib.colors as mplcolors
+from matplotlib import colormaps as mplcolormaps
+
+### Custom colormaps ###
+
+# Color map bluered (blue-gray-red)
+cmapdata_bluered = {
+	'red':   [(0.000, 0.0, 0.0), (0.500, 0.5, 0.5), (1.000, 1.0, 1.0)],
+	'green': [(0.000, 0.0, 0.0), (0.500, 0.5, 0.5), (1.000, 0.0, 0.0)],
+	'blue':  [(0.000, 1.0, 1.0), (0.500, 0.5, 0.5), (1.000, 0.0, 0.0)]}
+cmapdata_grayred = {
+	'red':   [(0.000, 0.5, 0.5), (1.000, 1.0, 1.0)],
+	'green': [(0.000, 0.5, 0.5), (1.000, 0.0, 0.0)],
+	'blue':  [(0.000, 0.5, 0.5), (1.000, 0.0, 0.0)]}
+cmapdata_bluered_dual = {
+	'red':   [(0.000, 0.0, 0.0), (0.500, 0.4, 0.6), (1.000, 1.0, 1.0)],
+	'green': [(0.000, 0.0, 0.0), (0.500, 0.4, 0.4), (1.000, 0.0, 0.0)],
+	'blue':  [(0.000, 1.0, 1.0), (0.500, 0.6, 0.4), (1.000, 0.0, 0.0)]}
+cm_bluered = mplcolors.LinearSegmentedColormap('bluered', cmapdata_bluered)
+cm_grayred = mplcolors.LinearSegmentedColormap('grayred', cmapdata_grayred)
+cm_bluered_dual = mplcolors.LinearSegmentedColormap('bluereddual', cmapdata_bluered_dual)
+mplcolormaps.register(cmap=cm_bluered)
+mplcolormaps.register(cmap=cm_grayred)
+mplcolormaps.register(cmap=cm_bluered_dual)
+
+# Color map yrbc (yellow-red-blue-cyan)
+cmapdata_yrbc = {
+	'red':   [(0.000, 0.0, 0.0), (0.333, 0.0, 0.0), (0.500, 0.5, 0.5), (0.667, 1.0, 1.0), (1.000, 1.0, 1.0)],
+	'green': [(0.000, 1.0, 1.0), (0.333, 0.0, 0.0), (0.500, 0.5, 0.5), (0.667, 0.0, 0.0), (1.000, 1.0, 1.0)],
+	'blue':  [(0.000, 1.0, 1.0), (0.333, 1.0, 1.0), (0.500, 0.5, 0.5), (0.667, 0.0, 0.0), (1.000, 0.0, 0.0)]}
+cmapdata_yrbc2 = {
+	'red':   [(0.000, 0.0, 0.0), (0.200, 0.3, 0.3), (0.333, 0.0, 0.0), (0.500, 0.5, 0.5), (0.667, 1.0, 1.0), (0.800, 0.9, 0.9), (1.000, 1.0, 1.0)],
+	'green': [(0.000, 1.0, 1.0), (0.200, 0.5, 0.5), (0.333, 0.0, 0.0), (0.500, 0.5, 0.5), (0.667, 0.0, 0.0), (0.800, 0.5, 0.5), (1.000, 1.0, 1.0)],
+	'blue':  [(0.000, 1.0, 1.0), (0.200, 0.9, 0.9), (0.333, 1.0, 1.0), (0.500, 0.5, 0.5), (0.667, 0.0, 0.0), (0.800, 0.3, 0.3), (1.000, 0.0, 0.0)]}
+cm_yrbc = mplcolors.LinearSegmentedColormap('yrbc', cmapdata_yrbc)
+cm_yrbc2 = mplcolors.LinearSegmentedColormap('yrbc2', cmapdata_yrbc2)
+mplcolormaps.register(cmap=cm_yrbc)
+mplcolormaps.register(cmap=cm_yrbc2)
+
+# Color map allwhite and allgray
+cmapdata_allwhite = {
+	'red':   [(0.000, 1.0, 1.0), (1.000, 1.0, 1.0)],
+	'green': [(0.000, 1.0, 1.0), (1.000, 1.0, 1.0)],
+	'blue':  [(0.000, 1.0, 1.0), (1.000, 1.0, 1.0)]}
+cmapdata_allgray = {
+	'red':   [(0.000, 0.875, 0.875), (1.000, 0.875, 0.875)],
+	'green': [(0.000, 0.875, 0.875), (1.000, 0.875, 0.875)],
+	'blue':  [(0.000, 0.875, 0.875), (1.000, 0.875, 0.875)]}
+cm_allwhite = mplcolors.LinearSegmentedColormap('allwhite', cmapdata_allwhite)
+cm_allgray = mplcolors.LinearSegmentedColormap('allgray', cmapdata_allgray)
+mplcolormaps.register(cmap=cm_allwhite)
+mplcolormaps.register(cmap=cm_allgray)
+
+try:
+	t20map = mplcolormaps['tab20']
+except KeyError:
+	t20map = None
+if t20map is not None:
+	t20altcolors = t20map.colors[0::2] + t20map.colors[1::2]
+	t20altmap = mplcolors.ListedColormap(t20altcolors, name='tab20alt')
+	mplcolormaps.register(cmap=t20altmap)
+	t21posnegcolors = tuple(list(t20map.colors)[-2::-2]) + ((0.0, 0.0, 0.0),) + t20map.colors[1::2]
+	t21posnegmap = mplcolors.ListedColormap(t21posnegcolors, name='tab21posneg')
+	mplcolormaps.register(cmap=t21posnegmap)
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/ploto/colortools.py b/kdotpy-v1.0.0/src/kdotpy/ploto/colortools.py
new file mode 100644
index 0000000000000000000000000000000000000000..5c0b90e337e00acd911c97f640d1be93e101c612
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/ploto/colortools.py
@@ -0,0 +1,886 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+import re
+import sys
+import os
+
+from matplotlib import use as mpluse
+mpluse('pdf')
+import matplotlib.pyplot as plt
+import matplotlib.cm as mplcm
+import matplotlib.colors as mplcolors
+from matplotlib import colormaps as mplcolormaps
+from matplotlib.patches import Rectangle
+from ..observables import all_observables
+from . import colormaps  # noqa: F401  # Import registers colormaps with matplotlib.pyplot
+from ..config import get_config, configpaths
+
+def color_auto_range(obsid):
+	"""Determine the appropriate range of the color function for the given observable.
+
+	Returns:
+	List of length 3, [omin, omax, type] where:
+	omin is the observable value that corresponds to the minimum color value
+	omax is the observable value that corresponds to the maximum color value
+	type is the 'color type' that determines which colormap value is used from
+	the configuration file
+	"""
+	if obsid in all_observables:
+		dimful = all_observables.dimful is True
+		obs = all_observables[obsid]
+		minmax = obs.get_range(dimful = dimful)
+		return [minmax[0], minmax[1], obs.colordata]
+	return [-1.0, 1.0, -0.5, 0.5]  # TODO: Check whether correct
+
+def colorbar_addlabel(axis, cb, label, label_x = 0.0, label_y = -0.05, fontsize = 8):
+	"""Add label to colorbar.
+	The method is taken as configuration value.
+
+	Arguments:
+	axis       Parent axis
+	cb         Colorbar object
+	label      String. Label text
+	label_x    Float. Offset for x coordinate.
+	label_y    Float. Offset for y coordinate.
+	fontsize   Float. Font size.
+	"""
+	labelpos = get_config('fig_colorbar_labelpos', choices = ['xaxis', 'yaxis', 'center', 'left', 'legacy'])
+	if labelpos == 'xaxis':
+		cb.ax.set_xlabel(label, fontsize = fontsize)
+	elif labelpos == 'yaxis':
+		cb.ax.set_ylabel(label.replace('\n', '  '), fontsize = fontsize)
+	elif labelpos == 'center':
+		t = cb.ax.figure.transFigure
+		x1, y1 = t.inverted().transform(axis.transAxes.transform([1.0, label_y]))
+		cb.ax.text(0.5 + 0.5 * x1, y1, label, ha = 'center', va = 'top', fontsize = fontsize, transform = t)
+	elif labelpos == 'left':
+		cb.ax.text(0.0, label_y, label, ha = 'left', va = 'top', fontsize = fontsize, transform = cb.ax.transAxes)
+	elif labelpos == 'legacy':
+		cb.set_label(label, labelpad=label_x, y=label_y, rotation=0, fontsize = fontsize)
+	else:
+		raise ValueError("Invalid value for labelpos")
+	return
+
+def make_colorbar(vmin, vmax, cmap = None, axis = None, fraction = 0.15, pad = 0.05, aspect = 20, filename = None, label = None, label_x = -20, label_y1 = -0.05, label_y2 = -0.04, ticks = None, fontsize = None):
+	"""Make colour bar (legend).
+
+	Arguments:
+	vmin      Value corresponding to the start of the colormap
+	vmax      Value corresponding to the end of the colormap
+	cmap      Colormap (required); the label of one of the colormaps defined by
+	          matplotlib or kdotpy.
+	axis      matplotlib axis instance in which to draw the colorbar; if None,
+	          use the current axis.
+    fraction, pad, aspect   Colorbar size; see matplotlib documentation [1].
+    filename  NOT USED
+    label     Label below the colorbar
+    label_x, label_y1, label_y2   Numerical values that determine the location
+                                  of the label
+	ticks     Set the ticks of the colorbar. See matplotlib documentation [1].
+	fontsize  Font size of the label. See matplotlib documentation [1].
+
+    [1] Documentation for matplotlib.pyplot.colorbar at matplotlib.org
+	"""
+	if cmap is None:
+		raise ValueError("The argument 'cmap' is required")
+	if axis is None:
+		axis = plt.gca()
+	fontsize_s = 8 if fontsize is None else fontsize * 0.8
+	sm = mplcm.ScalarMappable(cmap=cmap)
+	sm.set_array(np.array([vmin, vmax]))
+	cb = plt.colorbar(sm, ax=axis, fraction = fraction, pad = pad, aspect = aspect, ticks = ticks)
+	cb.ax.tick_params(labelsize=fontsize)
+	if label is not None:
+		fontsize1 = fontsize_s if '\n' in label else fontsize
+		label_y = label_y2 if '\n' in label else label_y1
+		colorbar_addlabel(axis, cb, label, label_x = label_x, label_y = label_y, fontsize = fontsize1)
+	return cb
+
+def make_dual_indexed_colorbar(vmin, vmax, cmap = None, axis = None, **kwds):
+	"""Make a dual colorbar with two columns of indexed colours.
+	See make_colorbar for arguments."""
+	if isinstance(cmap, str):
+		cmap = get_colormap(cmap)
+	if cmap is None:
+		raise ValueError("The argument 'cmap' is required")
+	# Make a 'dummy' colorbar with white background, ...
+	cb = make_colorbar(vmin, vmax, cmap = 'allwhite', axis = axis, **kwds)
+
+	# ... on top of which coloured patches are drawn.
+	vrange = int(vmax - vmin)
+	for x in range(0, vrange):
+		col1 = cmap((x + 0.25) / vrange)
+		col2 = cmap((x + 0.75) / vrange)
+		cb.ax.add_patch(Rectangle((0.0, x / vrange), 0.5, 1 / vrange, facecolor = col1, zorder=1, transform = cb.ax.transAxes))
+		cb.ax.add_patch(Rectangle((0.5, x / vrange), 0.5, 1 / vrange, facecolor = col2, zorder=1, transform = cb.ax.transAxes))
+	cb.ax.plot([0.25] * vrange, [(x + 0.5) / vrange for x in range(0, vrange)], 'k^', markersize = 3.0, mew = 0.25, transform = cb.ax.transAxes)
+	cb.ax.plot([0.75] * vrange, [(x + 0.5) / vrange for x in range(0, vrange)], 'kv', markersize = 3.0, mew = 0.25, transform = cb.ax.transAxes)
+	cb.ax.tick_params(length = 0.0)
+	return cb
+
+def make_dual_shaded_colorbar(vmin, vmax, cmap = None, axis = None, label = None, label_x = -20, label_y1 = -0.05, label_y2 = -0.04, fontsize = None, twosided = False, **kwds):
+	"""Make a dual colorbar with two columns of shaded colours.
+	See make_colorbar for arguments.
+
+	Additional argument:
+	twosided    True or False. If the colorbar represents the absolute value of
+	            an observable, show positive and negative values if True, only
+	            positive values if False.
+	"""
+	if isinstance(cmap, str):
+		cmap = get_colormap(cmap)
+	if cmap is None:
+		raise ValueError("The argument 'cmap' is required")
+	fontsize_s = 8 if fontsize is None else fontsize * 0.8
+
+	vmin1 = -max(abs(vmin), abs(vmax)) if twosided else vmin
+	vmax1 = max(abs(vmin), abs(vmax)) if twosided else vmax
+
+	# Make a 'dummy' colorbar with white background, ...
+	cb = make_colorbar(vmin1, vmax1, cmap = 'allwhite', axis = axis, fontsize = fontsize, **kwds)
+
+	# ... on top of which coloured patches are drawn.
+	n = 64
+	for x in range(0, n):
+		xc = abs(x - 0.5 * (n - 1)) / 0.5 / (n - 1) if twosided else (x + 0.5) / n
+		col1 = cmap(0.5 - 0.5 * xc)
+		col2 = cmap(0.5 + 0.5 * xc)
+		cb.ax.add_patch(Rectangle((0.0, x / n), 0.5, 1.0 / n, facecolor = col1, zorder=1, transform = cb.ax.transAxes))
+		cb.ax.add_patch(Rectangle((0.5, x / n), 0.5, 1.0 / n, facecolor = col2, zorder=1, transform = cb.ax.transAxes))
+
+	if label is not None:
+		fontsize1 = fontsize_s if '\n' in label else fontsize
+		label_y = label_y2 if '\n' in label else label_y1
+		colorbar_addlabel(axis, cb, label, label_x = label_x, label_y = label_y, fontsize = fontsize1)
+	return cb
+
+def make_transitions_colorbar(vmin, vmax, cmap = None, axis = None, markersize = 1.0, **kwds):
+	"""Make colorbar for the transitions plot with variable-size markers.
+
+	Arguments:
+	markersize    Sets the size of the markers.
+	For other arguments, see make_colorbar."""
+	if isinstance(cmap, str):
+		cmap = get_colormap(cmap)
+	if cmap is None:
+		raise ValueError("The argument 'cmap' is required")
+	# Make a 'dummy' colorbar with white background, ...
+	margin = 0.025
+	cb = make_colorbar(-margin, 1.0 + margin, cmap = 'allgray', axis = axis, **kwds)
+
+	# ... and add 'plot points'
+	vmin1, vmax1 = int(np.floor(vmin)), int(np.ceil(vmax))
+	nval = int((vmax1 - vmin1) * np.floor(20 / (vmax1 - vmin1)))
+	legend_values = np.linspace(0.0, 1.0, nval + 1)
+	colorval = [cmap(0.1 + 0.9 * val**3) for val in legend_values]
+	sizes = [(0.02 + 0.98 * val**5) * markersize**2 for val in legend_values]
+	cb.ax.scatter([0.5] * len(legend_values), (legend_values + margin) / (1.0 + 2.0 * margin), c = colorval, s = sizes, zorder=2, transform = cb.ax.transAxes)
+
+	vmin1, vmax1 = int(np.floor(vmin)), int(np.ceil(vmax))
+	cb.set_ticks([(val - vmin) / (vmax - vmin) for val in range(vmin1, vmax1 + 1)])
+	cb.set_ticklabels(["$10^{%i}$" % val for val in range(vmin1, vmax1 + 1)])
+	cb.ax.tick_params(length = 0.0)
+	return cb
+
+
+_failed_colormap_tries = []
+_failed_colormap_imports = []
+_available_cmaps_shown = False
+def try_colormap(cmap_or_list):
+	"""Try all colormap labels from a list and take the first valid label."""
+	global _failed_colormap_tries
+	global _failed_colormap_imports
+	global _available_cmaps_shown
+	if isinstance(cmap_or_list, mplcolors.Colormap):
+		return cmap_or_list.name
+	if isinstance(cmap_or_list, str):
+		cmap_or_list = [cm.strip() for cm in cmap_or_list.split(',')]
+	if not isinstance(cmap_or_list, list):
+		raise TypeError("Argument must be a list of colormap identifiers or a comma separated string")
+	if not all(isinstance(cmap, (str, mplcolors.Colormap)) for cmap in cmap_or_list):
+		raise TypeError("Elements of argument cmap_or_list must be str or Colormap instances")
+	for cmap in cmap_or_list:
+		if isinstance(cmap, mplcolors.Colormap):
+			return cmap.name
+		elif cmap in mplcolormaps:
+			return cmap
+		elif cmap.startswith('file:'):
+			filename = cmap[5:]
+			if filename in _failed_colormap_imports:
+				continue
+			try:
+				cmapf = import_colormap(filename)
+			except FileNotFoundError:
+				_failed_colormap_imports.append(filename)
+				sys.stderr.write("ERROR (try_colormap): Failed to import colormap from '%s'. File not found.\n" % filename)
+			except Exception as e:
+				_failed_colormap_imports.append(filename)
+				sys.stderr.write("ERROR (try_colormap): Failed to import colormap from '%s'. Invalid colour data.\n" % filename)
+				raise
+			else:
+				if cmapf is not None:
+					return cmap
+				else:
+					sys.stderr.write("ERROR (try_colormap): Failed to import colormap from '%s'. Invalid colour data.\n" % filename)
+					_failed_colormap_imports.append(filename)
+	if cmap_or_list not in _failed_colormap_tries:
+		_failed_colormap_tries.append(cmap_or_list)
+		sys.stderr.write("Warning (try_colormap): No suitable colormap found. Fall back to default 'Blues'.\n")
+		if not _available_cmaps_shown:
+			_available_cmaps_shown = True
+			sys.stderr.write("Warning (try_colormap): Available colormaps are: %s\n" % ", ".join(list(mplcolormaps)))
+	return "Blues"
+
+def get_colormap(cmap_or_list):
+	"""Try all colormap labels from a list, and return the first valid colormap instance."""
+	if isinstance(cmap_or_list, mplcolors.Colormap):
+		return cmap_or_list
+	cmap = try_colormap(cmap_or_list)
+	return mplcolormaps[cmap]
+
+def get_colormap_from_config(config_key, **kwds):
+	"""Return the first valid colormap instance from a config value."""
+	list_of_cmap = get_config(config_key, **kwds)
+	cmap = try_colormap(list_of_cmap)
+	return mplcolormaps[cmap]
+
+## This function avoids warnings caused by NaN values in the data
+def apply_colormap(cmap, data, nanvalue = 1.0):
+	"""Apply colormap to data that contains NaN values.
+
+	Arguments:
+	cmap      Colormap label
+	data      Array with data values
+	nanvalue  Apply this colour value to NaN values. This can be a single number
+	          between 0.0 and 1.0 (position on the colour scale) or an RGB
+	          triplet.
+
+	Returns:
+	Array with RGB triplets for each entry in the input array data.
+	"""
+	data1 = np.ma.masked_where(np.isnan(data), data)
+	cmapdata = cmap(data1)[..., 0:3]
+	if isinstance(nanvalue, float) and 0.0 <= nanvalue <= 1.0:
+		cmapdata[np.isnan(data)] = np.array([nanvalue, nanvalue, nanvalue])
+	elif isinstance(nanvalue, (list, tuple, np.ndarray)) and len(nanvalue) == 3:
+		cmapdata[np.isnan(data)] = np.array(nanvalue)
+	return cmapdata
+
+def rgb_to_hsl(rgb):
+	"""Conversion from rgb triplet to hsl triplet"""
+	r = rgb[..., 0]
+	g = rgb[..., 1]
+	b = rgb[..., 2]
+	mx = np.amax(rgb, axis = 2)
+	mn = np.amin(rgb, axis = 2)
+	c = mx - mn
+	z = np.zeros_like(r)
+	h0 = np.where( (c > 0) & (mx == r), (g - b) / c, np.where( (c > 0) & (mx == g), 2 + (b - r) / c,   np.where( (c > 0) & (mx == b), 4 + (r - g) / c, z)))
+	h = np.mod(h0, 6.0) / 6.0
+	l = (mx + mn) / 2
+	s = np.where(l < 1, z, c / (1.0 - np.abs(2.0 * l - 1.0)))
+	return np.stack((h, s, l), axis = -1)
+
+def hsl_to_rgb(hsl):
+	"""Conversion from hsl triplet to rgb triplet"""
+	hsl = np.asarray(hsl)
+	h6 = 6.0 * hsl[..., 0]
+	s = hsl[..., 1]
+	l = hsl[..., 2]
+	c = s * (1.0 - np.abs(2.0 * l - 1.0))
+	x = c * (1.0 - np.abs(np.mod(h6, 2) - 1.0))
+	z = np.zeros_like(h6)
+	r1 = np.where( (h6 <= 1) | (h6 >= 5), c, np.where( (h6 <= 2) | (h6 >= 4), x, z))
+	g1 = np.where( (h6 >= 1) & (h6 <= 3), c, np.where( (h6 >= 4), z, x))
+	b1 = np.where( (h6 >= 3) & (h6 <= 5), c, np.where( (h6 <= 2), z, x))
+	mn = l - 0.5 * c
+	return np.stack((r1 + mn, g1 + mn, b1 + mn), axis = -1)
+
+def hsl_mix(ratios, hval = None, normalize = False):
+	"""Mix colors in hsl color space"""
+	ratios = np.asarray(ratios)
+	n = ratios.shape[-1]
+	sm = np.sum(ratios, axis = -1)
+	r = ratios / sm[..., np.newaxis]  # normalize
+	if hval is None:
+		xval = np.cos(2 * np.pi * np.arange(0, n) / n)
+		yval = np.sin(2 * np.pi * np.arange(0, n) / n)
+	else:
+		xval = np.cos(2 * np.pi * np.asarray(hval))
+		yval = np.sin(2 * np.pi * np.asarray(hval))
+	x = np.sum(r * xval, axis = -1)
+	y = np.sum(r * yval, axis = -1)
+	h = np.remainder(np.arctan2(y, x) / 2. / np.pi, 1.0)
+	s = np.sqrt(x**2 + y**2)
+	if normalize:
+		l = 0.5 * np.ones_like(h)
+	else:
+		l = 0.5 * sm
+	return np.stack((h, s, l), axis = -1)
+
+def hsl_mix_to_rgb(ratios, hval = None, normalize = False):
+	"""Mix colors in hsl color space and convert to rgb triplet"""
+	hsl = hsl_mix(ratios, hval, normalize)
+	return hsl_to_rgb(hsl)
+
+def rgb_mix(ratios, colors = None, normalize = False, neutral = None):
+	"""Mix colors in rgb space"""
+	ratios = np.asarray(ratios)
+	n = ratios.shape[-1]
+
+	if colors is None:
+		hsl = np.stack((np.arange(0, n) * 1. / n, np.ones((n,), dtype = float), 0.5 * np.ones((n,), dtype = float)), axis = -1)
+		colors = hsl_to_rgb(hsl)
+	else:
+		colors = np.asarray(colors)
+
+	if normalize:
+		sm = np.sum(ratios, axis = -1)
+		rn = ratios / sm[..., np.newaxis]  # normalize
+		rgb = np.sum(rn[..., np.newaxis] * colors, axis = -2)
+	else:
+		rgb = np.sum(ratios[..., np.newaxis] * colors, axis = -2)
+		if neutral is not None and neutral is not False:
+			col_n = np.array(mplcolors.to_rgba(neutral)[0:3])
+			rem = np.clip(1.0 - np.sum(ratios, axis = -1), 0.0, 1.0)
+			rgb += np.multiply.outer(rem, col_n)
+
+	return np.clip(rgb, 0.0, 1.0)
+
+def do_rgb_mix(rgb, coltype, **kwds):
+	"""A simple wrapper that applies the appropriate colour mixing function from the 'color' type"""
+	if (not coltype.startswith("mix")) or len(coltype) <= 3:
+		return rgb_mix(rgb, **kwds)
+	elif coltype[3] == ';':
+		col = coltype.split(';')[1]
+		return rgb_mix(rgb, neutral = col, **kwds)
+	elif len(coltype) == 4:
+		col = coltype[3].lower()
+		return rgb_mix(rgb, neutral = col, **kwds)
+	else:
+		raise ValueError("Invalid mix colour type. 'mix' must be followed by a single-letter colour or ';' followed by a valid matplotlib colour string.")
+
+def mix_neutral(*arg):
+	"""Mix in a neutral colour.
+
+	Allowed argument patterns are (rgb, n) or (r, g, b, n), where:
+	rgb      is a single rgb color triplet or an array of triplets.
+	r, g, b  are either floats or arrays of identical size
+	n        is the 'neutral' color to be mixed in, which can be a triplet or
+	         any valid matplotlib color (triplet or string)"""
+
+	if len(arg) == 2:  # rgb, neutral_color
+		if isinstance(arg[0], np.ndarray) and arg[0].shape[-1] in [3, 4]:
+			rgb = arg[0][..., 0:3]
+		elif isinstance(arg[0], (tuple, list, np.ndarray)) and len(arg[0]) in [3, 4]:
+			rgb = np.array(arg[0][0:3])
+		else:
+			raise ValueError("Invalid RGB array")
+
+	elif len(arg) == 4:
+		if isinstance(arg[1], float) and isinstance(arg[2], float) and isinstance(arg[3], float):
+			rgb = np.array(arg[1:4])
+		elif isinstance(arg[1], np.ndarray) and isinstance(arg[2], np.ndarray) and isinstance(arg[3], np.ndarray):
+			rgb = np.dstack(arg[1:4])
+		else:
+			raise ValueError("Invalid R, G, B arrays")
+	else:
+		raise ValueError("Input must be of the form RGB, N or R, G, B, N.")
+
+	if arg[-1] is False or arg[-1] is None:
+		col_n = np.array([0.0, 0.0, 0.0], dtype = float)
+	else:
+		col_n = np.array(mplcolors.to_rgba(arg[1])[0:3])
+
+	rem = np.clip(1.0 - np.sum(rgb, axis = -1), 0.0, 1.0)
+	rgb += np.multiply.outer(rem, col_n)
+	if len(arg) == 2:
+		return rgb
+	elif len(arg) == 4 and rgb.ndim == 1:
+		return tuple(rgb)
+	else:
+		return rgb[..., 0], rgb[..., 1], rgb[..., 2]
+
+def do_mix_neutral(rgb, coltype):
+	"""A simple wrapper for the neutral mixer, that handles the 'color' type"""
+	if (not coltype.startswith("RGB")) or len(coltype) <= 3:
+		return rgb
+	elif coltype[3] == ';':
+		col = coltype.split(';')[1]
+		return mix_neutral(rgb, col)
+	elif len(coltype) == 4:
+		col = coltype[3].lower()
+		return mix_neutral(rgb, col)
+	else:
+		raise ValueError("Invalid RGB colour type. 'RGB' must be followed by a single-letter colour or ';' followed by a valid matplotlib colour string.")
+
+def intermediate_colors(rgb_arr):
+	"""Calculate intermediate colours
+	For an array (list) of rgb/rgba colors, calculate the intermediate if each two
+	consecutive colors. The resulting list will be shorter by one element than the
+	input array."""
+
+	if isinstance(rgb_arr, list):
+		rgb_arr = np.array(rgb_arr)
+		return [tuple(c) for c in 0.5 * (rgb_arr[1:] + rgb_arr[:-1])]
+	elif isinstance(rgb_arr, np.ndarray):
+		return 0.5 * (rgb_arr[1:] + rgb_arr[:-1])
+	else:
+		raise TypeError("Input should be list or array")
+
+def indexed_color_auto_range(cmap, default=None):
+	"""Return minimum and maximum index for ListedColormap
+
+	For a ListedColormap with N colors, return (-N / 2, N / 2) if N is odd and
+	(-N / 2 + 1 / 2, N / 2 + 1 / 2) if N is even. If the argument cmap is not
+	a ListedColormap, take N from the argument default.
+	"""
+	if isinstance(cmap, str):
+		cmap = get_colormap(cmap)
+	if isinstance(cmap, mplcolors.ListedColormap):
+		N = cmap.N
+	elif isinstance(default, int):
+		N = default
+	elif default is None:
+		return (None, None)
+	else:
+		raise TypeError("Argument default must be int or None")
+	x = N / 2
+	return (-x, x) if N % 2 == 1 else (-x + 0.5, x + 0.5)
+
+def indexed_colors(data, cmap, lower, upper):
+	"""Apply indexed colormap to data.
+
+	Arguments:
+	data    Array of data values
+	cmap    Label of the colormap
+	lower   Value corresponding to the first colour
+	upper   Value corresponding to the last colour
+	"""
+	crange = upper - lower
+	if isinstance(data, (list, np.ndarray)):
+		cc = [max(0, min(crange, x - lower)) for x in data]
+		return [cmap(c / crange) for c in cc]
+	else:
+		c = max(0, min(crange, data - lower))
+		return cmap(c / crange)
+
+def dual_indexed_colors(data1, data2, cmap, lower1, upper1):
+	"""Apply indexed colormap to data.
+
+	Arguments:
+	data1   Array of data values (vertical value)
+	data2   Array of data values (horizontal value)
+	cmap    Label of the colormap
+	lower1  Value (vertical) corresponding to the first (= lower) colour
+	upper1  Value (vertical) corresponding to the last (= upper) colour
+	"""
+	crange = upper1 - lower1
+	if isinstance(data1, (list, np.ndarray)) and isinstance(data2, (list, np.ndarray)):
+		cc1 = [max(0, min(crange - 0.5, x - lower1)) - 0.5 for x in data1]  # 'vertical' colour
+		cc2 = [1 if x < 0.0 else 0 for x in data2]  # 'horizontal' colour (left/right)
+		return [cmap((0.5 + 2 * round(c1) + c2) / crange / 2) for c1, c2 in zip(cc1, cc2)]
+	elif isinstance(data1, (float, int, np.floating, np.integer)) and isinstance(data2, (float, int, np.floating, np.integer)):
+		c1 = max(0, min(crange - 0.5, data1 - lower1)) - 0.5  # 'vertical' colour
+		c2 = 1 if data2 < 0 else 0  # 'horizontal' colour (left/right)
+		return cmap((0.5 + 2 * round(c1) + c2) / crange / 2)
+	else:
+		raise TypeError("Arguments data1 and data2 must be of the same type: either both lists/arrays or both single numbers")
+
+def dual_shaded_colors(data1, data2, cmap, lower1, upper1):
+	"""Apply dual shading colormap to data.
+
+	Arguments:
+	data1   Array of data values (vertical value)
+	data2   Array of data values (horizontal value)
+	cmap    Label of the colormap
+	lower1  Value (vertical) corresponding to the first (= lower) colour
+	upper1  Value (vertical) corresponding to the last (= upper) colour
+	"""
+	crange = upper1 - lower1
+	if isinstance(data1, (list, np.ndarray)) and isinstance(data2, (list, np.ndarray)):
+		cc1 = [(x - lower1) / crange for x in data1]  # 'vertical' colour
+		cc2 = [np.sign(x) for x in data2]  # 'horizontal' colour (left/right)
+		return [cmap(0.5 + 0.5 * c2 * c1) for c1, c2 in zip(cc1, cc2)]
+	elif isinstance(data1, (float, int, np.floating, np.integer)) and isinstance(data2, (float, int, np.floating, np.integer)):
+		c1 = (data1 - lower1) / crange  # 'vertical' colour
+		c2 = np.sign(data2)  # 'horizontal' colour (left/right)
+		return cmap(0.5 + 0.5 * c2 * c1)
+	else:
+		raise TypeError("Arguments data1 and data2 must be of the same type: either both lists/arrays or both single numbers")
+
+def color_interpolation(x_in, y_in, z_in, x_new, y_new):
+	"""Helper function for imshow_polar. Interpolates an array of colours.
+
+	Arguments:
+	x_in   Array of values on horizontal axis corresponding to z_in (source)
+	y_in   Array of values on vertical axis correpsonding to z_in (source)
+	z_in   2-dim array of data values (that should be plotted)
+	x_new  Array of x values at which z_in should be interpolated (target)
+	y_new  Array of y values at which z_in should be interpolated (target)
+
+	Returns:
+	2-dim array of interpolated values
+	"""
+	if z_in.ndim == 2:
+		if len(x_in) != z_in.shape[0]:
+			raise ValueError("x_in and z_in have non-matching shapes")
+		if len(y_in) != z_in.shape[1]:
+			raise ValueError("y_in and z_in have non-matching shapes")
+		# Interpolate x dimension
+		int1 = np.zeros((len(x_new), len(y_in)), dtype = z_in.dtype)
+		for iy in range(0, len(y_in)):
+			int1[:, iy] = np.interp(x_new, x_in, z_in[:, iy], left = 1.0, right = 1.0)
+		# Interpolate y dimension
+		int2 = np.zeros((len(x_new), len(y_new)), dtype = z_in.dtype)
+		for ix in range(0, len(x_new)):
+			int2[ix, :] = np.interp(y_new, y_in, int1[ix, :], left = 1.0, right = 1.0)
+		return int2
+	elif z_in.ndim == 3:
+		int3 = np.zeros((len(x_new), len(y_new), z_in.shape[2]), dtype = z_in.dtype)
+		for ic in range(0, z_in.shape[2]):
+			int3[:, :, ic] = color_interpolation(x_in, y_in, z_in[:, :, ic], x_new, y_new)
+		return int3
+	else:
+		raise ValueError("Incorrect number of dimensions")
+
+def parse_color_str(c):
+	"""Parse colour string; convert integers and floats to tuples"""
+	m = re.match(r'([012]?[0-9]?[0-9])\s*,\s*([012]?[0-9]?[0-9])\s*,\s*([012]?[0-9]?[0-9])\s*,\s*([012]?[0-9]?[0-9])', c)
+	if m is not None:
+		return (int(m.group(1)) / 255, int(m.group(2)) / 255, int(m.group(3)) / 255, int(m.group(4)) / 255)
+	m = re.match(r'([012]?[0-9]?[0-9])\s*,\s*([012]?[0-9]?[0-9])\s*,\s*([012]?[0-9]?[0-9])', c)
+	if m is not None:
+		return (int(m.group(1)) / 255, int(m.group(2)) / 255, int(m.group(3)) / 255)
+	m = re.match(r'([01]\.[0-9]+)\s*,\s*([01]\.[0-9]+)\s*,\s*([01]\.[0-9]+)\s*,\s*([01]\.[0-9]+)', c)
+	if m is not None:
+		return (float(m.group(1)), float(m.group(2)), float(m.group(3)), float(m.group(4)))
+	m = re.match(r'([01]\.[0-9]+)\s*,\s*([01]\.[0-9]+)\s*,\s*([01]\.[0-9]+)', c)
+	if m is not None:
+		return (float(m.group(1)), float(m.group(2)), float(m.group(3)))
+	m = re.match(r'([01]\.[0-9]+)', c)
+	if m is not None:
+		return float(m.group(1))
+	return c
+
+def import_colormap(filename):
+	"""Import colormap from a file.
+
+	The file must contain either: A list of single colours. Then a
+	ListedColormap is returned (without interpolation). Or: A list of
+	'value:color' pairs, then a	LinearSegmentedColormap is returned. This is an
+	interpolated colormap. Discontinuities by be achieved by including two
+	entries with the same value.
+	"""
+	# First, look for file locally, then in all configuration paths (last-in-first-out)
+	filename_full = filename
+	if not os.path.isfile(filename):
+		for path in reversed(configpaths):
+			if os.path.isfile(os.path.join(path, filename)):
+				filename_full = os.path.join(path, filename)
+				break
+
+	# Read data
+	values = []
+	colors = []
+	with open(filename_full, 'r') as f:
+		for ln in f:
+			l = ln.strip().lstrip()
+			if len(l) == 0:
+				continue
+			if l.startswith('# ') or l.startswith('##'):
+				continue
+			m = re.match(r'(([01]?\.[0-9]*|0|1)[:;=])?\s*(.+)', l)
+			if m is None:
+				raise ValueError("Invalid color data")
+			value = None if m.group(2) is None or len(m.group(2)) == 0 else float(m.group(2))
+			values.append(value)
+			colors.append(parse_color_str(m.group(3)))
+
+	if len(values) == 0:
+		sys.stderr.write("ERROR (import_colormap): No valid color data.\n")
+		return None
+	if all([value is None for value in values]):
+		cmap = mplcolors.ListedColormap(colors, name = 'file:%s' % filename)
+		mplcolormaps.register(cmap=cmap)
+		return cmap
+	if any([value is None for value in values]):
+		sys.stderr.write("ERROR (import_colormap): Anchor values must be given either for all entries, or not at all.\n")
+		return None
+	values = np.array(values, dtype = float)
+
+	rgba = mplcolors.to_rgba_array(colors)
+	r, g, b, a = rgba.transpose()
+
+	# Construct segment data
+	rdata, gdata, bdata, adata = [], [], [], []
+	for j, v in enumerate(values):
+		if j > 0 and v == values[j-1]:
+			rdata[-1] = (v, rdata[-1][1], r[j])
+			gdata[-1] = (v, gdata[-1][1], g[j])
+			bdata[-1] = (v, bdata[-1][1], b[j])
+			adata[-1] = (v, adata[-1][1], a[j])
+		else:
+			rdata.append((v, r[j], r[j]))
+			gdata.append((v, g[j], g[j]))
+			bdata.append((v, b[j], b[j]))
+			adata.append((v, a[j], a[j]))
+
+	cdict = {'red': rdata, 'green': gdata, 'blue': bdata}
+	has_alpha = np.max(np.abs(a - 1.0)) >= 1e-3
+	if has_alpha:
+		cdict['alpha'] = adata
+	cmap = mplcolors.LinearSegmentedColormap('file:%s' % filename, cdict)
+	mplcolormaps.register(cmap=cmap)
+	return cmap
+
+
+### DATA COLORS ###
+def parse_obsrange(obsrange_auto, obsrange_man):
+	"""Parse observable range. This selects the right range between automatic and manual setting."""
+	if not (isinstance(obsrange_auto, (list, tuple)) and len(obsrange_auto) == 2):
+		raise TypeError("Argument 'obsrange_auto' must be a list/tuple of length 2")
+
+	if obsrange_man is None:
+		return tuple(obsrange_auto)
+	if not (isinstance(obsrange_man, (list, tuple)) and len(obsrange_man) == 2):
+		raise TypeError("Argument 'obsrange_man' must be either None or a list/tuple of length 2")
+
+	if obsrange_man[0] is not None and obsrange_man[1] is None:
+		obsrange2 = (obsrange_man[1], obsrange_man[0])
+	elif obsrange_man[0] is not None and obsrange_man[1] is not None and obsrange_man[0] > obsrange_man[1]:
+		obsrange2 = (obsrange_man[1], obsrange_man[0])
+	else:
+		obsrange2 = obsrange_man
+
+	if obsrange2[0] is None and obsrange2[1] is not None:  # (None, x): automatic lower limit
+		obsmin = -obsrange2[1] if obsrange_auto[0] == -obsrange_auto[1] else 0.0
+		return obsmin, obsrange2[1]
+	else:
+		return tuple(obsrange2)
+
+
+index_warning_raised = False
+def data_colors(data, color, lb, plot_mode, obsrange = None):
+	"""Assign colours to data set.
+
+	Arguments:
+	data      DiagData instance
+	color     Colour type data; this may be None, a string, or a list of the
+	          form [colortype (string), obsid, ..., obsid, param, ..., param]
+	lb        State label, either an integer (bindex) or a 2-tuple
+	          (llindex, bindex). This is used if the 'indexed' type requires
+	          these values.
+	plot_mode Plot mode that is passed on to data.get_observable()
+	obsrange  If set, override the automatic observable range.
+
+	Returns:
+	colorval    Array of RGB triplets
+	normalized  Whether colour values are normalized (only meaningful for RGB
+	            and 'mix' colour types)."""
+	global index_warning_raised
+
+	# No color:
+	if color is None:
+		return None, False
+
+	# Single color
+	elif isinstance(color, str):
+		return color, False
+
+	# Using matplotlib colormap
+	elif isinstance(color, list) and len(color) >= 5 and color[0] == "colormap":
+		odata = data.get_observable(color[1], lb, plot_mode)
+		omin, omax = parse_obsrange((color[2], color[3]), obsrange)
+		cmap = get_colormap(color[4:] if len(color) > 4 else color[4])
+		colorval = apply_colormap(cmap, (np.real(odata) - omin) / (omax - omin))
+		return colorval, False
+
+	# Color-mapped from observable
+	elif isinstance(color, list) and len(color) in [4, 5] and color[0] == "obs":
+		odata = data.get_observable(color[1], lb, plot_mode)
+		omin, omax = parse_obsrange((color[2], color[3]), obsrange)
+		cfgstr = 'color_%s' % (color[4] if len(color) == 5 else 'symmobs')
+		cmap = get_colormap_from_config(cfgstr)
+		cmin, cmax = 0., 1.
+		colorval = apply_colormap(cmap, (cmax - cmin) * (np.real(odata) - omin) / (omax - omin) + cmin)
+		return colorval, False
+
+	# Color-mapped from observable (sigma)
+	elif isinstance(color, list) and len(color) == 5 and color[0] == "sigma":
+		odata1 = data.get_observable(color[1], lb, plot_mode)
+		odata2 = data.get_observable(color[2], lb, plot_mode)
+		odata = np.sqrt(np.real(odata2) - np.real(odata1)**2)
+		omax = max(abs(color[3]), abs(color[4]))
+		_, omax = parse_obsrange((0.0, omax), obsrange)
+		cmin, cmax = 0., 1.
+		cmap = get_colormap_from_config('color_sigma')
+		colorval = apply_colormap(cmap, (cmax - cmin) * np.real(odata) / omax + cmin)
+		return colorval, False
+
+	# RGB color, triplet
+	elif isinstance(color, list) and len(color) == 4 and color[0].startswith("RGB"):
+		odata = data.get_observable(color[1:], lb, plot_mode)
+		rgb_val = np.moveaxis(np.clip(np.real(odata), 0.0, 1.0), 0, -1)
+		# check normalization; this is most probably False, if we use the eight-orbital model
+		normalized = (np.max(np.abs(np.sum(rgb_val, axis = 1) - 1.0)) < 1e-3)
+		if not normalized:
+			rgb_val = do_mix_neutral(rgb_val, color[0])
+		rgb_val[np.any(np.isnan(odata), axis = 0)] = np.array([1.0, 1.0, 1.0])  # set nan values to white
+		return rgb_val, normalized
+
+	# RGB color, triplet of pairs
+	elif isinstance(color, list) and len(color) == 7 and color[0].startswith("RGB"):
+		odata = data.get_observable(color[1:], lb, plot_mode)
+		rgb_val = np.moveaxis(np.clip(np.real(odata), 0.0, 1.0), 0, -1)  # Transposition for ndim = 2
+		rgb_val = np.reshape(rgb_val, rgb_val.shape[:-1] + (3, 2))
+		rgb_val = np.sum(rgb_val, axis = -1)
+		# check normalization; this is most probably False, if we use the eight-orbital model
+		normalized = (np.max(np.abs(np.sum(rgb_val, axis = 1) - 1.0)) < 1e-3)
+		if not normalized:
+			rgb_val = do_mix_neutral(rgb_val, color[0])
+		rgb_val[np.any(np.isnan(odata), axis = 0)] = np.array([1.0, 1.0, 1.0])  # set nan values to white
+		return rgb_val, normalized
+
+	# color mix, number of pairs
+	elif isinstance(color, list) and len(color) >= 7 and (len(color) % 2) == 1 and color[0].startswith("mix"):
+		odata = data.get_observable(color[1:], lb, plot_mode)
+		ncol = (len(color) - 1) // 2
+		colorval = np.moveaxis(np.clip(np.real(odata), 0.0, 1.0), 0, -1)
+		colorval = np.reshape(colorval, colorval.shape[:-1] + (ncol, 2))
+		colorval = np.sum(colorval, axis = -1)
+		mixcolors = [(1., 0., 0.), (1., 1., 0.), (0., 1., 0.), (0., 0., 1.)] if ncol == 4 else None
+		rgb_val = do_rgb_mix(colorval, color[0], colors = mixcolors)
+		return rgb_val, True
+
+	# indexed colors
+	elif isinstance(color, list) and len(color) == 4 and color[0] == "indexed":
+		config_key = 'color_bindex' if color[1] == 'bindex' else 'color_indexed'
+		cmap = get_colormap_from_config(config_key)
+		crange, coffset = color[3] - color[2], color[2]
+		if color[1] == 'llindex':
+			if plot_mode == "index":
+				if not (isinstance(lb, tuple) and len(lb) == 2):
+					raise TypeError("Label lb is expected to be a 2-tuple")
+				colorval = indexed_colors(lb[0], cmap, color[2], color[3])
+			else:
+				ddp = data.find(*lb) if (isinstance(lb, tuple) and len(lb) == 2) else None
+				if ddp is not None and ddp.llindex is not None:
+					colorval = indexed_colors(ddp.llindex, cmap, color[2], color[3])
+				else:
+					colorval = 'b'
+					if not index_warning_raised:
+						index_warning_raised = True
+						sys.stderr.write("Warning (data_colors): Indexed observable '%s' not available for coloring\n" % color[1])
+			return colorval, False
+		elif color[1] == 'bindex':
+			if plot_mode in ["index", "index2d"]:
+				bi = lb[1] if isinstance(lb, tuple) else lb
+				colorval = indexed_colors(bi, cmap, color[2], color[3])
+			else:
+				ddp = data.find(*lb)
+				if ddp is not None and ddp.bindex is not None:
+					colorval = indexed_colors(ddp.bindex, cmap, color[2], color[3])
+				else:
+					colorval = 'b'
+					if not index_warning_raised:
+						index_warning_raised = True
+						sys.stderr.write("Warning (data_colors): Indexed observable '%s' not available for coloring\n" % color[1])
+			return colorval, False
+		else:
+			odata = data.get_observable(color[1], lb, plot_mode)
+			colorval = 'b' if odata is None else indexed_colors(np.real(odata), cmap, color[2], color[3])
+			return colorval, False
+	# dual indexed colors
+	elif isinstance(color, list) and len(color) == 5 and color[0] == "indexedpm":
+		cmap = get_colormap_from_config('color_indexedpm')
+		crange, coffset = color[4] - color[3], color[3]
+		odata2 = np.real(data.get_observable(color[2], lb, plot_mode))
+		if color[1] == 'llindex':
+			if plot_mode == "index":
+				if not (isinstance(lb, tuple) and len(lb) == 2):
+					raise TypeError("Label lb is expected to be a 2-tuple")
+				if np.nanmax(odata2) - np.nanmin(odata2) < 1e-6:  # use single colour if all values equal
+					colorval = dual_indexed_colors(lb[0], np.mean(odata2), cmap, color[3], color[4])
+				else:
+					colorval = dual_indexed_colors(np.full(len(odata2), lb[0]), odata2, cmap, color[3], color[4])
+			else:
+				ddp = data.find(*lb) if (isinstance(lb, tuple) and len(lb) == 2) else None
+				if ddp is not None and ddp.llindex is not None:
+					colorval = dual_indexed_colors(ddp.llindex, np.real(odata2), cmap, color[3], color[4])
+				else:
+					colorval = 'b'
+					if not index_warning_raised:
+						index_warning_raised = True
+						sys.stderr.write("Warning (data_colors): Indexed observable '%s' not available for coloring\n" % color[1])
+			return colorval, False
+		elif color[1] == 'bindex':
+			if plot_mode in ["index", "index2d"]:
+				bi = lb[1] if isinstance(lb, tuple) else lb
+				if np.nanmax(odata2) - np.nanmin(odata2) < 1e-6:  # use single colour if all values equal
+					colorval = dual_indexed_colors(bi, np.mean(odata2), cmap, color[3], color[4])
+				else:
+					colorval = dual_indexed_colors(np.full(len(odata2), bi), odata2, cmap, color[3], color[4])
+			else:
+				ddp = data.find(*lb)
+				if ddp is not None and ddp.bindex is not None:
+					colorval = dual_indexed_colors(ddp.bindex, odata2, cmap, color[3], color[4])
+				else:
+					colorval = 'b'
+					if not index_warning_raised:
+						index_warning_raised = True
+						sys.stderr.write("Warning (data_colors): Indexed observable '%s' not available for coloring\n" % color[1])
+			return colorval, False
+		else:
+			odata1 = np.real(data.get_observable(color[1], lb, plot_mode))
+			colorval = 'b' if odata1 is None else dual_indexed_colors(odata1, odata2, cmap, color[3], color[4])
+			return colorval, False
+	# dual shaded colors (shadedpm, shadedpmabs)
+	elif isinstance(color, list) and len(color) == 5 and color[0].startswith("shadedpm"):
+		cmap = get_colormap_from_config('color_shadedpm')
+		odata1 = data.get_observable(color[1], lb, plot_mode)
+		odata2 = data.get_observable(color[2], lb, plot_mode)
+		if color[0].endswith("abs") and odata1 is not None:
+			odata1 = np.abs(odata1)
+		colorval = 'b' if odata1 is None else dual_shaded_colors(np.real(odata1), np.real(odata2), cmap, color[3], color[4])
+		return colorval, False
+	return None, False
diff --git a/kdotpy-v1.0.0/src/kdotpy/ploto/disp.py b/kdotpy-v1.0.0/src/kdotpy/ploto/disp.py
new file mode 100644
index 0000000000000000000000000000000000000000..50ce0abdf35dd5a24ddbbbe43db6ffe9dc9976c2
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/ploto/disp.py
@@ -0,0 +1,934 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+import sys
+
+from matplotlib import use as mpluse
+mpluse('pdf')
+import matplotlib.pyplot as plt
+from matplotlib import rcParams
+from matplotlib.backends.backend_pdf import PdfPages
+from matplotlib.collections import LineCollection
+
+from ..config import get_config, get_config_bool, get_config_num
+from .colortools import data_colors, get_colormap, get_colormap_from_config
+from .tools import get_fignum, get_levels, get_plot_size, imshow_polar, plot_energies, plot_data_series, process_plot_obs
+from .tools import SpinArrows, spin_markers, get_vector_obs, get_observable_vector_scale
+from .tools import log10_scale, get_transitions_log_limits, get_transitions_quantity
+from .toolslegend import add_obs_legend, legend_extends
+from .toolstext import add_band_labels, add_char_labels, format_axis_unit, get_partext, get_title_position, set_ylabel, set_disp_axis_label, set_band_label_2d
+from .toolsticks import set_ticks, set_polar_ticks
+
+from ..etransform import ETransform
+from ..momentum import Vector, VectorGrid
+
+
+####### PLOTS #######
+
+def bands_1d(
+		data, filename = "", mode = None, obs = None, erange = None, xrange = None,
+		legend = False, labels = True, title = None, title_pos = None,
+		showplot = False, paramstr = "", addtofig = None, energies = None,
+		transform = None, markers = None, plotvar = None, obsrange = None,
+		**plotopts):
+	"""One-dimensional band (dispersion) plot.
+	This function is used typically to produce a plot of energy as function of
+	momentum (k) or magnetic field (b).
+
+	Arguments:
+	data        DiagData instance that contains the data
+	filename    Output filename
+	mode        Plot mode
+	obs         Observable id used for colouring. If None, use a default colour.
+	erange      Extent of the vertical axis. If None, determine automatically.
+	xrange      Extent of the horizontal axis. If None, determine automatically.
+	legend      If True, add a legend.
+	labels      If True, add band (character) labels.
+	title       A string that sets the plot title. If None or an empty string,
+	            do not show a title.
+	title_pos   Position of the title (see tools.get_title_position). If None,
+	            determine automatically.
+	showplot    If True, show the plot on screen. If False, only save it to a
+	            file.
+	paramstr    ?
+	addtofig    May be None, a matplotlib figure instance or an integer or
+	            string that refers to a matplotlib figure. If None, create a new
+	            matplotlib figure, otherwise draw the data into the existing
+	            figure.
+	energies    A dict instance with special energies. This is used to show
+	            horizontal dashed lines at the Fermi energy, charge neutrality
+	            point, etc.. See tools.plot_energies(). If None, do not plot
+	            special energies.
+	transform   An ETransform instance. This may be used to change the vertical
+	            axis to a different quantity that has a one-to-one relation to
+	            energy, for example integrated DOS.
+	markers     A matplotlib marker string. If set, use this marker for all
+	            data points. If None (recommended), determine the markers
+	            automatically.
+	plotvar     String that refers to a component of the variable on the
+	            horizontal axis. For example, if the grid points in data are in
+	            polar coordinates, one may plot as function of 'kx'. If None
+	            (default), use the 'natural' variable in the data grid.
+	obsrange    None or a 2-tuple. If set, this range determines the minimum and
+	            maximum 'colour' value, more or less the lower and upper value
+	            of the colorbar.
+	**plotopts  Additional plot options, which are ignored by this function.
+
+	Returns:
+	A matplotlib figure instance.
+	"""
+	legend_ex = legend_extends(obs)
+	if addtofig is None:
+		fig = plt.figure(get_fignum(), figsize = get_plot_size('s', legend = legend_ex))
+		plt.subplots_adjust(**get_plot_size('subplot', legend = legend_ex))
+		ax = fig.add_subplot(1, 1, 1)
+	elif isinstance(addtofig, (int, str)):
+		fig = plt.figure(addtofig)
+		ax = plt.gca()
+	else:
+		fig = plt.figure(addtofig.number)
+		ax = plt.gca()
+	if len(data) == 0:
+		sys.stderr.write("Warning (ploto.bands_1d): No data.\n")
+		return fig
+
+	# Determine ranges
+	if data.gridvar == 'k':
+		vgrid = data.get_momentum_grid()
+		if isinstance(vgrid, VectorGrid):
+			if len(vgrid.var) == 1:
+				kval, kname, pval, pname = vgrid.get_var_const()
+			else:
+				sys.stderr.write("ERROR (ploto.bands_1d): Invalid dimension for VectorGrid\n")
+				return
+			if plotvar is not None:
+				try:
+					kval = vgrid.get_values(plotvar)
+					kname = plotvar
+				except:
+					sys.stderr.write("Warning (ploto.bands_1d): Invalid 'plotvar'. The plot will use the default variable instead.\n")
+		else:
+			sys.stderr.write("Warning (ploto.bands_1d): Trying to make a plot without a VectorGrid instance\n")
+			k0 = data.get_momenta()[0]
+			kname = k0.components()[0]
+			if plotvar is not None:
+				try:
+					k0.component(plotvar)
+					kname = plotvar
+				except:
+					sys.stderr.write("Warning (ploto.bands_1d): Invalid 'plotvar'. The plot will use the default variable instead.\n")
+			if not kname.startswith('k'):
+				kname = 'k' + kname
+			kval = [k.component(kname, 'k') for k in data.get_momenta()]
+			pval = None
+			pname = None
+	elif data.gridvar == 'b':
+		vgrid = data.get_paramval()
+		if isinstance(vgrid, VectorGrid):
+			kval, kname, pval, pname = vgrid.get_var_const()
+			if plotvar is not None:
+				try:
+					kval = vgrid.get_values(plotvar)
+					kname = plotvar
+				except:
+					sys.stderr.write("Warning (ploto.bands_1d): Invalid 'plotvar'. The plot will use the default variable instead.\n")
+		else:
+			sys.stderr.write("Warning (ploto.bands_1d): Trying to make a plot without a VectorGrid instance\n")
+			k0 = vgrid[0]
+			kname = k0.components()[0]
+			if plotvar is not None:
+				try:
+					k0.component(plotvar)
+					kname = plotvar
+				except:
+					sys.stderr.write("Warning (plot): Invalid 'plotvar'. The plot will use the default variable instead.\n")
+			if not kname.startswith('b'):
+				kname = 'b' + kname
+			kval = [x.component(kname, 'b') for x in data.get_paramval()]
+			pval = None
+			pname = None
+
+		# Special case: (btheta, bphi) -> btheta
+		if pname == ('btheta', 'bphi') and abs(pval[1]) < 1e-6:
+			pname = 'btheta'
+			pval = pval[0]
+	elif data.gridvar is None or data.gridvar == '':
+		if len(data) == 1:
+			sys.stderr.write("Warning (ploto.bands_1d): No dependence to be plotted, because data contains only one point.\n")
+		else:
+			sys.stderr.write("ERROR (ploto.bands_1d): Grid variable not set.\n")
+		return
+	else:
+		raise NotImplementedError("Grid variable may be 'k' or 'b' only.")
+
+	if isinstance(xrange, (list, tuple)) and len(xrange) == 2:
+		kmin = min(xrange)
+		kmax = max(xrange)
+	elif xrange is None:
+		kmin0 = min(kval)
+		kmax0 = max(kval)
+		extend_xaxis = get_config_num('fig_extend_xaxis', minval = 0)
+		kmin = kmin0 - extend_xaxis * (kmax0 - kmin0) if extend_xaxis > 0 else kmin0
+		kmax = kmax0 + extend_xaxis * (kmax0 - kmin0) if extend_xaxis > 0 else kmax0	
+	else:
+		raise TypeError("Argument xrange must be a list/tuple of length 2 or None")
+	if kmax == kmin:
+		sys.stderr.write("Warning (ploto.bands_1d): All values on horizontal axis are equal.\n")
+
+	if isinstance(erange, list) and len(erange) == 2:
+		emin = min(erange)
+		emax = max(erange)
+	else:
+		emins = [min(d.eival) for d in data]
+		emaxs = [max(d.eival) for d in data]
+		emin = max(emins)
+		emax = min(emaxs)
+
+	xoffset = 0.0
+	if "xoffset" in sys.argv:
+		xoffset = 0.002 * (kmax - kmin)  # TODO: This code is not functional at this moment
+
+	## Determine plot mode
+	vec_obs = None
+	if mode is None or mode == 'automatic':
+		mode = 'auto'
+	if mode in ['auto', 'join', 'curves', 'horizontal']:
+		try:
+			b_idx = data.check_bindex()
+			if mode == 'auto':
+				mode = 'curves' if b_idx else 'normal'
+				if "verbose" in sys.argv:
+					print("Plot mode 'auto' -> '%s'" % mode)
+			elif not b_idx:
+				sys.stderr.write("Warning (ploto.bands_1d): Cannot connect data points.\n")
+				mode = 'normal'
+		except:
+			b_idx = False
+			sys.stderr.write("Warning (ploto.bands_1d): Exception in connecting data points.\n")
+			mode = 'normal'
+	else:
+		b_idx = False
+		vec_obs = get_vector_obs(mode)
+		if vec_obs is None and mode not in ['spin', 'isopz']:
+			mode = 'normal'
+
+	## Initiate observable/color handling
+	obsids = data[0].obsids
+	if obs is None or obsids is None:
+		color, obslabel = None, None
+	else:
+		color, obslabel = process_plot_obs(obsids, obs)
+	normalized = None
+
+	## Iterate over data sets
+	data_labels, plot_mode = data.get_data_labels(by_index = b_idx)
+	if "verbose" in sys.argv:
+		print("Plotting %i data series; plot mode %s/%s" % (len(data_labels), mode, plot_mode))
+
+	stack_by_index = get_config_bool('plot_dispersion_stack_by_index')
+	for lb in data_labels:
+		xdata, ydata = data.get_plot_coord(lb, plot_mode)
+		if len(ydata) == 0:
+			continue  # skip empty dataset
+
+		# if plot_mode in ["momentum", "index"]:
+		if kname == 'deg':
+			kname = 'phi'
+		if isinstance(xdata, Vector):
+			xdata = xdata.component(kname, prefix = kname[0])
+		elif len(xdata) >= 1 and isinstance(xdata[0], Vector):
+			xdata = [x.component(kname, prefix = kname[0]) for x in xdata]
+
+		if isinstance(markers, str):
+			pass
+		elif isinstance(markers, tuple) and len(markers) == 2:
+			color, markers = markers[0], markers[1]
+		elif plot_mode == "index" and mode in ['join', 'curves']:
+			markers = '-'
+		elif vec_obs is not None:
+			if vec_obs[0] in obsids and vec_obs[1] in obsids:
+				o1data = np.real(data.get_observable(vec_obs[0], lb, plot_mode))
+				o2data = np.real(data.get_observable(vec_obs[1], lb, plot_mode))
+				vec_scale = get_observable_vector_scale(vec_obs)
+				markers = SpinArrows(o1data, o2data, scale = vec_scale, maxlen = 1.0, unit_marker = (mode[-1] == '1'))
+			else:
+				sys.stderr.write("Warning (ploto.bands_1d): Observables '%s' and '%s' not available for vector plot.\n" % vec_obs)
+		elif mode == "spin":  # (total) 'spin' Jz
+			oszdata = np.real(data.get_observable('jz', lb, plot_mode))
+			markers = None if oszdata is None else spin_markers(oszdata)
+		elif mode == "isopz":  # isoparity
+			oszdata = np.real(data.get_observable('isopz', lb, plot_mode))
+			markers = None if oszdata is None else spin_markers(oszdata, 0.01, 0.99)
+		else:
+			markers = None
+
+		# Get color data
+		colorval, normalized = data_colors(data, color, lb, plot_mode, obsrange = obsrange)
+
+		# Set stacking order (zorder value)
+		zorder = None
+		if stack_by_index and isinstance(lb, tuple) and len(lb) == 2 and isinstance(lb[0], (int, np.integer)):  # lowest LL on top
+			zorder = 1 + 1 / (lb[0] + 3)
+		elif isinstance(lb, (int, np.integer)):  # lowest band index on top
+			zorder = 1 + 1 / (abs(lb) + 1)  # lb should not be 0, but be prepared for it
+
+		# Plot this data series
+		plot_data_series(xdata, ydata, colors = colorval, markers = markers, yrange = [emin, emax], transform = transform, zorder = zorder)
+
+	if legend:
+		add_obs_legend(color, normalized = normalized, obslabel = obslabel, obsrange = obsrange, filename = filename)
+
+	# Plot band labels
+	if labels:
+		bandchar = data.get_all_char()
+		yrange = transform.plotrange if transform is not None else (emin, emax)
+		if bandchar is not None:
+			add_char_labels(bandchar, xrange = (kmin, kmax), yrange = yrange, transform = transform, box = False, size=8)
+		else:
+			data0 = data.get_base_point()
+			if data0 is not None:
+				add_band_labels(data0.eival, data0.bindex, data0.llindex, xrange = (kmin, kmax), yrange = yrange, transform = transform, box = False, size=8)
+
+	# Determine data range and axis labels
+	if transform is None:
+		plt.axis([kmin, kmax, emin, emax])
+		set_ylabel("$E$", "$\\mathrm{meV}$")
+	elif isinstance(transform, ETransform):
+		if transform.plotrange is not None:
+			plt.axis([kmin, kmax, transform.plotrange[0], transform.plotrange[1]])
+		else:
+			ymin = transform.min(emin)
+			ymax = transform.max(emax)
+			plt.axis([kmin, kmax, ymin, ymax])
+		if transform.qstr is not None and transform.ustr is not None:
+			set_ylabel(transform.qstr, transform.ustr)
+		else:
+			set_ylabel("transformed $E$", "a.u.")
+	else:
+		raise TypeError("Argument transform must be an ETransform instance")
+	set_disp_axis_label(kname, set_x = True)
+
+	# Plot Fermi energy and co
+	if energies is not None and get_config_bool('plot_dispersion_energies'):
+		plot_energies({e: energies[e] for e in energies if e != 'e0'}, xval = [kmin, kmax], transform = transform)
+
+	# Plot charge neutral point
+	if get_config_bool('plot_ecnp'):
+		xdata = data.get_xval()
+		if isinstance(xdata, Vector):
+			xdata = xdata.component(kname, prefix = kname[0])
+		elif len(xdata) >= 1 and isinstance(xdata[0], Vector):
+			xdata = [x.component(kname, prefix = kname[0]) for x in xdata]
+		try:
+			ecnp = data.get_cnp()
+		except ValueError:
+			sys.stderr.write("Warning (ploto.bands1d): Cannot plot E_CNP, because universal band indices are missing.\n")
+		else:
+			cnp_color = get_config('plot_dispersion_energies_color')
+			if cnp_color == '':
+				cnp_color = rcParams['lines.color']
+			plt.plot(xdata, ecnp, ':', lw=1, color=cnp_color)
+			cnptext_x, cnptext_y = xdata[-1], ecnp[-1] - 0.02 * (emax - emin)
+			plt.text(cnptext_x, cnptext_y, r"$E_\mathrm{CNP}$", ha="right", va="top")
+
+	# Ticks
+	set_ticks(ax, xdegrees = kname.endswith("phi") or kname.endswith("theta"))
+
+	# Plot title
+	txty = None
+	if (title is not None) and (title != ""):
+		txtx, txty, txtha, txtva = get_title_position(title_pos)
+		ax.text(txtx, txty, title, ha = txtha, va = txtva, transform = ax.transAxes)
+
+	# Constant-parameter text (do not include if we add new data to existing figure)
+	partext = get_partext(pval, pname)
+	if get_config_bool("plot_dispersion_parameter_text") and partext != "" and addtofig is None:
+		partext_y = 0.92 if txty is not None and txty >= 0.95 else 0.98
+		ax.text(0.03, partext_y, partext, ha='left', va='top', transform=ax.transAxes)
+
+	if filename != "" and filename is not None:
+		plt.savefig(filename)
+	if showplot:
+		plt.show()
+	return fig
+
+def add_bhz(data, filename = "", showplot = False, title = None, title_pos = None, k0 = None):
+	"""Add BHZ dispersion to an existing dispersion plot.
+
+	Arguments:
+	data        DiagData instance that contains the data from diagonalization of
+	            the BHZ Hamiltonian. (Not the original k.p Hamiltonian.)
+	filename    Output filename
+	showplot    If True, show plot on screen.
+	title       A string that sets the plot title. If None or an empty string,
+	            do not show a title.
+	title_pos   Position of the title (see tools.get_title_position). If None,
+	            determine automatically.
+
+	Returns:
+	A matplotlib figure instance. (The current figure.)
+	"""
+	fig = plt.gcf()
+	ax = plt.gca()
+
+	# kval1 = data.get_momenta()
+	kval, kname, pval, pname = data.grid.get_var_const()
+	eival = np.array([d.eival for d in data])
+	eivec = np.array([d.eivec for d in data])
+	na = eival.shape[1]
+
+	blockstyles = ['-', '--', ':']
+	blockcolors = ['r', 'r', 'r']
+	blockcolor_cfg = get_config('bhz_plotcolor')
+	try:
+		colval = blockcolor_cfg.split(',')
+	except:
+		sys.stderr.write("ERROR (ploto.add_bhz): Invalid configuration value for 'bhz_plotcolor'\n")
+	else:
+		if len(colval) == 1:
+			blockcolors = [colval[0], colval[0], colval[0]]
+		elif len(colval) == 2:
+			blockcolors = ['r', colval[0], colval[1]]
+		elif len(colval) == 3:
+			blockcolors = [colval[2], colval[0], colval[1]]
+		else:
+			sys.stderr.write("ERROR (ploto.add_bhz): Invalid configuration value for 'bhz_plotcolor'\n")
+	blockstyle_cfg = get_config('bhz_plotstyle')
+	try:
+		styval = blockstyle_cfg.split(',')
+	except:
+		sys.stderr.write("ERROR (ploto.add_bhz): Invalid configuration value for 'bhz_plotstyle'\n")
+	else:
+		if len(styval) == 1:
+			blockstyles = [styval[0], styval[0], styval[0]]
+		elif len(styval) == 2:
+			blockstyles = ['-', styval[0], styval[1]]
+		elif len(styval) == 3:
+			blockstyles = [styval[2], styval[0], styval[1]]
+		else:
+			sys.stderr.write("ERROR (ploto.add_bhz): Invalid configuration value for 'bhz_plotstyle'\n")
+
+	for b in range(0, na):
+		# determine whether the eigenvector lives in a specific block
+		ki0 = 0
+		block = None
+		for ki, eivecsk in enumerate(eivec):
+			eivec1, eivec2 = eivecsk[:na//2, b], eivecsk[na//2:, b]
+			norm1, norm2 = np.real(np.vdot(eivec1, eivec1)), np.real(np.vdot(eivec2, eivec2))
+			if norm1 >= 0.99 and norm2 <= 0.01:
+				if block is None:
+					block = 1
+				elif block in [0, 2]:
+					plt.plot(kval[ki0:ki], eival[ki0:ki, b], ls = blockstyles[block], color = blockcolors[block])
+					ki0 = max(ki - 1, 0)
+					block = 1
+			elif norm2 >= 0.99 and norm1 <= 0.01:
+				if block is None:
+					block = 2
+				elif block in [0, 1]:
+					plt.plot(kval[ki0:ki], eival[ki0:ki, b], ls = blockstyles[block], color = blockcolors[block])
+					ki0 = max(ki - 1, 0)
+					block = 2
+			else:
+				if block is None:
+					block = 0
+				elif block in [1, 2]:
+					plt.plot(kval[ki0:ki], eival[ki0:ki, b], ls = blockstyles[block], color = blockcolors[block])
+					ki0 = max(ki - 1, 0)
+					block = 0
+		plt.plot(kval[ki0:], eival[ki0:, b], ls = blockstyles[block], color = blockcolors[block])
+
+		# plt.plot(kval, eival[:,b], 'k-', color = "k" if color is None else color)
+
+	if k0 is not None:
+		ddp0, idx0 = data.find(k0, return_index = True)
+		if ddp0 is not None:
+			plt.plot(np.full(na, kval[idx0]), ddp0.eival, 'ro')
+
+	if (title is not None) and (title != ""):
+		txtx, txty, txtha, txtva = get_title_position(title_pos)
+		ax.text(txtx, txty, title, ha = txtha, va = txtva, transform = ax.transAxes)
+	if filename != "" and filename is not None:
+		plt.savefig(filename)
+	if showplot:
+		plt.show()
+	return fig
+
+def add_transitions(data, filename = "", fig = None, showplot = False, color = None, title = None, title_pos = None, maxnum = None, plotvar = None, **plotopts):
+	"""Add transitions to an existing dispersion plot.
+	This function draws vertical bars corresponding to optical transitions. The
+	colours correspond to the transition amplitudes, like in the transitions
+	plot, see auxil.transitions().
+
+	Arguments:
+	data        DiagData instance, whose DiagDataPoint members contain
+	            TransitionsData members (ddp.transitions is not None).
+	filename    Output filename
+	fig         None, an integer or matplotlib figure instance. The figure in
+	            which the transitions are drawn. If None, use the current
+	            figure.
+	showplot    If True, show plot on screen.
+	color       NOT USED
+	title       A string that sets the plot title. If None or an empty string,
+	            do not show a title.
+	title_pos   Position of the title (see tools.get_title_position). If None,
+	            determine automatically.
+	maxnum      If an integer, draw this many of the highest-amplitude
+	            transitions at each data point. If None, draw all.
+	**plotopts  Additional plot options, which are ignored by this function.
+
+	Returns:
+	A matplotlib figure instance.
+	"""
+	if fig is None:
+		fig = plt.gcf()
+	elif isinstance(fig, int):
+		plt.figure(fig)
+	else:
+		plt.figure(fig.number)
+	ax = fig.gca()
+	cmap = get_colormap(["hot_r", "Blues"])
+	markersize = rcParams['lines.markersize']
+
+	# Determine ranges
+	if data.gridvar == 'k':
+		vgrid = data.get_momentum_grid()
+		if isinstance(vgrid, VectorGrid):
+			if len(vgrid.var) == 1:
+				kval, kname, pval, pname = vgrid.get_var_const()
+			else:
+				sys.stderr.write("ERROR (ploto.add_transitions): Invalid dimension for VectorGrid\n")
+				return
+		else:
+			sys.stderr.write("ERROR (ploto.add_transitions): Data must include a VectorGrid instance\n")
+			return
+	elif data.gridvar == 'b':
+		vgrid = data.get_paramval()
+		if isinstance(vgrid, VectorGrid):
+			kval, kname, pval, pname = vgrid.get_var_const()
+		else:
+			sys.stderr.write("ERROR (ploto.add_transitions): Data must include a VectorGrid instance\n")
+			return
+		# Special case: (btheta, bphi) -> btheta
+		if pname == ('btheta', 'bphi') and abs(pval[1]) < 1e-6:
+			pname = 'btheta'
+			pval = pval[0]
+	else:
+		raise NotImplementedError("Grid variable may be 'k' or 'b' only.")
+
+	if plotvar is not None:
+		if not isinstance(vgrid, VectorGrid):
+			sys.stderr.write("Warning (ploto.add_transitions): Option 'plotvar' not supported if input variables are not in VectorGrid format.\n")
+		else:
+			try:
+				kval = vgrid.get_values(plotvar)
+				kname = plotvar
+			except:
+				sys.stderr.write("Warning (ploto.add_transitions): Invalid 'plotvar'. The plot will use the default variable instead.\n")
+
+	qty = get_transitions_quantity()
+	# Determine color scale (maximum value)
+	qmin, qmax = get_transitions_log_limits(data, qty = qty)
+
+	for k, d in zip(kval, data):
+		td = d.transitions
+		if td is not None and td.n > 0:
+			amp = td.get_values(qty)
+			q = log10_scale(amp, qmin, qmax)
+			order = np.argsort(-amp)
+			if maxnum is not None and td.n > maxnum:
+				order = order[:maxnum]
+				n = maxnum
+			else:
+				n = td.n
+			colorval = cmap(0.1 + 0.9 * (q[order])**3)
+			sizes = (0.02 + 0.98 * (q[order])**2)
+			# for e1, e2, de, q1 in zip(td.energies[order,0], td.energies[order,1], td.delta_e()[order], q[order]):
+			# 	print ("%10s %7.3f %7.3f %7.3f %5.3f" % (k, e1, e2, de, q1))
+			xval = np.full(n, k)
+			yval1 = np.amin(td.energies[order], axis = 1)
+			yval2 = np.amax(td.energies[order], axis = 1)
+			ax.scatter(xval, yval1, c = colorval, s = sizes * markersize**2, marker = '_', zorder = 22)
+			ax.scatter(xval, yval2, c = colorval, s = sizes * markersize**2, marker = '_', zorder = 22)
+
+			xy1 = np.array((xval, yval1))
+			xy2 = np.array((xval, yval2))
+			lines = np.array((xy1, xy2)).transpose((2, 0, 1))
+
+			coll = LineCollection(lines, colors = colorval, linewidths = 2.0 * sizes)
+			# coll.set_zorder(list(23 + td.amplitudes[order]))
+			coll.set_zorder(23)
+			ax.add_collection(coll)
+
+	if (title is not None) and (title != ""):
+		txtx, txty, txtha, txtva = get_title_position(title_pos)
+		ax.text(txtx, txty, title, ha = txtha, va = txtva, transform = ax.transAxes)
+	if filename != "" and filename is not None:
+		plt.savefig(filename)
+	if showplot:
+		plt.show()
+	return fig
+
+def bands_2d(data, filename = "", mode = None, obs = None, erange = None, krange = None, legend = False, labels = True, title = None, title_pos = None, showplot = False, paramstr = "", addtofig = None, energies = None, transform = None, markers = None, extrema = None, plotvar = None, obsrange = None, **plotopts):
+	"""Draw a contour or density plot of the band dispersion as function of two variables.
+	The plot can be cartesian or polar. The result is a set of figures, one for
+	each band.
+
+	Arguments
+
+	Arguments:
+	data        DiagData instance that contains the data
+	filename    Output filename
+	mode        Plot mode (string)
+	obs         Observable id used for colouring. If None, use the band energy
+	            as the colour scale.
+	erange      Energy range that determines which bands are drawn. If set (as a
+	            2-tuple), then bands whose energy lies completely out of this
+	            range are not drawn.
+	krange      NOT USED
+	legend      If True, add a legend.
+	labels      If True, add band (character) labels. For this type of plots,
+	            these are band labels printed in the upper left corner.
+	title       A string that sets the plot title. If None or an empty string,
+	            do not show a title.
+	title_pos   Position of the title (see tools.get_title_position). If None,
+	            determine automatically.
+	showplot    NOT USED
+	paramstr    ?
+	addtofig    NOT USED
+	energies    A dict instance with special energies, see tools.plot_energies()
+	            for more information. In these plots, the special energies are
+	            drawn as contours. If None, do not draw special energy contours.
+	transform   NOT USED
+	markers     NOT USED
+	extrema     A dict instance or None. The dict keys are the band labels and
+	            the values should be a list of BandExtremum instances. If set,
+	            then draw the minima and maxima in the plot as green or red
+	            (respectively) numbers that indicate the extremal energy, at the
+	            appropriate positions. If None, do not show these.
+	plotvar     NOT USED
+	obsrange    None or a 2-tuple. If set, this range determines the minimum and
+	            maximum 'colour' value, more or less the lower and upper value
+	            of the colorbar.
+	**plotopts  Additional plot options, which are ignored by this function.
+
+	Returns:
+	A matplotlib figure instance.
+	"""
+	## Initiate observable/color handling
+	obsids = data[0].obsids
+	if obs is None or obsids is None or obs == "energy":
+		color, obslabel = None, None
+	else:
+		color, obslabel = process_plot_obs(obsids, obs)
+	normalized = None
+	if isinstance(color, list) and len(color) > 1 and color[0] in ["RGB", "mix"]:
+		color[0] += "W"
+	vec_obs = get_vector_obs(mode)  # get vector observables if applicable
+
+	## Iterate over data sets
+	data_labels, plot_mode = data.get_data_labels(by_index = True)
+	if "verbose" in sys.argv:
+		print("Plotting %i data series; plot mode %s/%s" % (0 if data_labels is None else len(data_labels), mode, plot_mode))
+	if plot_mode != "index":
+		sys.stderr.write("Warning (ploto.bands_2d): Plot mode 'index' is required, but not available\n")
+		return
+	if data_labels is None or data_labels == []:
+		sys.stderr.write("Warning (ploto.bands_2d): Data labels are required, but are not available.\n")
+		return
+
+	data_labels_sel = []
+	for lb in data_labels:
+		kdata, zdata = data.get_plot_coord(lb, "index2d")
+		if np.all(np.isnan(zdata)):
+			continue
+
+		zmin, zmax = np.nanmin(zdata), np.nanmax(zdata)
+
+		# do not plot bands that lie completely outside the energy range
+		if erange is not None:
+			if erange[0] is not None and zmax < erange[0]:
+				continue
+			if erange[1] is not None and zmin > erange[1]:
+				continue
+		data_labels_sel.append(lb)
+	if len(data_labels_sel) == 0:
+		sys.stderr.write("Warning (ploto.bands_2d): No data within energy range.\n")
+		return
+
+	data_k0 = data.get_base_point()
+	pdfpages = PdfPages(filename) if filename is not None and filename != "" else None
+	cmap_energy = get_colormap_from_config('color_energy')
+	for lb in data_labels_sel:
+		kdata, zdata = data.get_plot_coord(lb, "index2d")
+		zmin, zmax = np.nanmin(zdata), np.nanmax(zdata)
+
+		# extract coordinates (kx, ky) or (k, kphi)
+		polar = False
+		degrees = None
+
+		if data.grid is not None and data.grid.prefix.startswith('k'):
+			gridval, gridvar, constval, const = data.grid.get_var_const()
+			kxval, kyval = data.grid.get_grid(['r' if v == 'k' else v.lstrip('k') for v in gridvar])
+			polar = (gridvar in [("k", "kphi"), ("k", "ktheta")])
+			degrees = data.grid.degrees  # also necessary if not polar
+		elif isinstance(kdata[0][0], Vector):
+			kxval = np.array([[k.to_tuple()[0] for k in kk] for kk in kdata])
+			kyval = np.array([[k.to_tuple()[1] for k in kk] for kk in kdata])
+			if kdata[0][0].vtype in ['pol', 'cyl', 'sph']:
+				polar = True
+				degrees = kdata[0][0].degrees
+				gridvar = ('k', 'kphi')  # TODO: Spherical
+			else:
+				gridvar = ('kx', 'ky')
+		else:
+			kxval = np.array([[k[0] for k in kk] for kk in kdata])
+			kyval = np.array([[k[1] for k in kk] for kk in kdata])
+			if isinstance(kdata[0][0], tuple) and len(kdata[0][0]) == 3 and kdata[0][0][2] in ["deg", "phi", "kphi", "rad"]:
+				polar = True
+				degrees = (kdata[0][0][2] == 'deg')
+				gridvar = ('k', 'kphi')
+			else:
+				gridvar = ('kx', 'ky')
+		if min(kxval.shape) <= 1 or min(kxval.shape) <= 1:
+			sys.stderr.write("ERROR (ploto.bands_2d): Insufficient data (length <= 1) in at least one axis.\n")
+			return
+
+		# Get energy range and define contours
+		contour_thicknesses = [0.5, 1.5]
+		elevelsf, elevels, ethickness, elevelfmt = get_levels(zmin, zmax, thicknesses = contour_thicknesses)
+		if erange is not None and erange[0] is not None and erange[1] is not None:
+			emin, emax = tuple(erange)
+		else:
+			emin, emax = min(elevelsf), max(elevelsf)
+
+		legend_ex = legend_extends(obs)
+		fig = plt.figure(get_fignum(), figsize = get_plot_size('s', legend = legend_ex))
+		# adjust so that the plot area is square
+		fig_width = get_plot_size('pw', inches = False, legend = legend_ex)
+		fig_height = get_plot_size('ph', inches = False, legend = legend_ex)
+		fig_size = min(fig_width, fig_height)
+		hsize = get_plot_size('h', inches = False, legend = legend_ex)
+		vsize = get_plot_size('v', inches = False, legend = legend_ex)
+		margin_l = get_plot_size('ml', inches = False, legend = legend_ex)
+		margin_r = get_plot_size('mr', inches = False, legend = legend_ex)
+		margin_b = get_plot_size('mb', inches = False, legend = legend_ex)
+		margin_t = get_plot_size('mt', inches = False, legend = legend_ex)
+		margin_l_1 = hsize - margin_r - fig_size
+		margin_r_1 = hsize - margin_l - fig_size
+		margin_t_1 = 0.5 * (vsize - fig_size) if polar else margin_t
+		margin_b_1 = 0.5 * (vsize - fig_size) if polar else vsize - margin_t - fig_size
+
+		if polar:
+			ax = fig.add_axes([margin_l / hsize, margin_b_1 / vsize, fig_size / hsize, fig_size / vsize], projection = 'polar')
+			if degrees:
+				kyval = np.radians(kyval)
+
+			# Colors
+			if color is None:
+				ax.contourf(kyval, kxval, zdata, elevelsf, cmap = cmap_energy, vmin=emin, vmax= emax)
+			else:
+				dkx, dky = kxval[0, 1] - kxval[0, 0], kyval[1, 0] - kyval[0, 0]
+				extent = [kyval.min(), kyval.max(), kxval.min(), kxval.max()]
+				colorval, normalized = data_colors(data, color, lb, "index2d", obsrange = obsrange)
+				if isinstance(colorval, np.ndarray) and colorval.ndim == 3:
+					imshow_polar(kyval, kxval, colorval, interpolation = 'bilinear')
+				# else:
+				# 	ax.set_facecolor(colorval)
+
+			# Contours
+			contours = ax.contour(kyval, kxval, zdata, elevels, levels = elevels, linewidths = ethickness, colors = 'k', linestyles ='solid', zorder = 3)
+
+			# Ticks and grid
+			set_polar_ticks(kxval, kyval)
+
+			if extrema is not None and lb in extrema:
+				for ex in extrema[lb]:
+					r, theta = ex.k.polar(deg = False, fold = False)
+					txt = ("$%.1f$" % ex.energy) if emax - emin <= 3.0 else ("$%i$" % int(round(ex.energy)))
+					txt = elevelfmt % ex.energy
+					if r < 0.0:
+						r = abs(r)
+						theta += np.pi
+					ax.text(theta, r, txt, fontsize=6, color='r' if ex.minmax == 'min' else '#00cf00', ha = 'center', va= 'center', zorder = 6)
+
+			if vec_obs is not None:
+				if vec_obs[0] in obsids and vec_obs[1] in obsids:
+					o1data = np.real(data.get_observable(vec_obs[0], lb, plot_mode))
+					o2data = np.real(data.get_observable(vec_obs[1], lb, plot_mode))
+					vec_scale = get_observable_vector_scale(vec_obs)
+					markers = SpinArrows(o1data, o2data, scale = vec_scale, maxlen = 1.0, unit_marker = (mode[-1] == '1'))
+					markers.plot(kxval, kyval, polar = True, rmin = 0.1 * kxval.max(), zorder = 7)
+				else:
+					sys.stderr.write("Warning (ploto.bands_2d): Observables '%s' and '%s' not available for vector plot.\n" % vec_obs)
+
+		else:
+			ax = fig.add_axes([margin_l / hsize, margin_b_1 / vsize, fig_size / hsize, fig_size / vsize])
+			# Colors
+			if color is None:
+				ax.contourf(kxval, kyval, zdata, elevelsf, cmap = cmap_energy, vmin=emin, vmax= emax)
+			else:
+				dkx, dky = kxval[0, 1] - kxval[0, 0], kyval[1, 0] - kyval[0, 0]
+				extent = [kxval.min() - 0.5 * dkx, kxval.max() + 0.5 * dkx, kyval.min() - 0.5 * dky, kyval.max() + 0.5 * dky]
+				colorval, normalized = data_colors(data, color, lb, "index2d", obsrange = obsrange)
+				if isinstance(colorval, np.ndarray) and colorval.ndim == 3:
+					ax.imshow(np.clip(colorval.transpose(1, 0, 2), 0, 1), extent = extent, origin = 'lower', aspect = 'auto')
+					# The transposition is necessary due to the way in which imshow accepts coordinates
+				# else:
+				# 	ax.set_facecolor(colorval)
+			ax.set_xlim([kxval.min(), kxval.max()])
+			ax.set_ylim([kyval.min(), kyval.max()])
+			set_ticks(ax)
+			set_disp_axis_label(gridvar[0], set_x = True)
+			set_disp_axis_label(gridvar[1], set_y = True)
+
+			# Contours
+			contours = ax.contour(kxval, kyval, zdata, elevels, levels = elevels, linewidths = ethickness, colors = 'k', linestyles = 'solid', zorder = 3)
+
+			if extrema is not None and lb in extrema:
+				for ex in extrema[lb]:
+					kx, ky = (ex.k.component(c, prefix = 'k') for c in gridvar)  # possibly (kx, kz) or (ky, kz)
+					txt = ("$%.1f$" % ex.energy) if emax - emin <= 3.0 else ("$%i$" % int(round(ex.energy)))
+					txt = elevelfmt % ex.energy
+					ax.text(kx, ky, txt, fontsize=6, color='r' if ex.minmax == 'min' else '#00cf00', ha = 'center', va= 'center', zorder = 6)
+
+			if vec_obs is not None:
+				if vec_obs[0] in obsids and vec_obs[1] in obsids:
+					o1data = np.real(data.get_observable(vec_obs[0], lb, plot_mode))
+					o2data = np.real(data.get_observable(vec_obs[1], lb, plot_mode))
+					vec_scale = get_observable_vector_scale(vec_obs)
+					markers = SpinArrows(o1data, o2data, scale = vec_scale, maxlen = 1.0, unit_marker = (mode[-1] == '1'))
+					kmax = max(np.amax(kxval), np.amax(kyval))
+					markers.plot(kxval, kyval, polar = False, rmin = 0.099 * kmax, zorder = 7)
+				else:
+					sys.stderr.write("Warning (ploto.bands_2d): Observables '%s' and '%s' not available for vector plot.\n" % vec_obs)
+
+		# print ("levels:", contours.levels)
+		labeled_levels = [l for l, th in zip(elevels, ethickness) if th == contour_thicknesses[1]]
+		# print ("labeled levels:", labeled_levels)
+		if len(labeled_levels) >= 1:
+			try:
+				plt.clabel(contours, labeled_levels, inline=True, fmt=elevelfmt, fontsize=6)
+			except IndexError:
+				sys.stderr.write(
+					"ERROR (ploto.bands_2d): Labelling the contours has failed. " +
+					"This is probably due to a bug in Matplotlib. " +
+					"Sometimes, re-running kdotpy may resolve it.\n")
+				# plt.clabel sometimes throws the following IndexError
+				# exception. The occurrence is seemingly random, because simply
+				# running kdotpy again (with the same arguments) can resolve it.
+				# This is probably a bug in Matplotlib or one of its
+				# dependencies.
+				#
+				# File ".../python3.12/site-packages/matplotlib/contour.py", line 379, in _split_path_and_get_label_rotation
+				#     start = movetos[movetos <= idx][-1]
+				#             ~~~~~~~~~~~~~~~~~~~~~~~^^^^
+				# IndexError: index -1 is out of bounds for axis 0 with size 0
+
+		# Plot special energies
+		if energies is not None and get_config_bool('plot_dispersion_energies'):
+			if polar:
+				k1val, k2val = kyval, kxval
+			else:
+				k1val, k2val = kxval, kyval
+			ef0 = energies['ef0'] if 'ef0' in energies else energies.get('mu0')
+			ef = energies['ef'] if 'ef' in energies else energies.get('mu')	
+			ef0_tex = r'$E_{\mathrm{F},0}$' if 'ef0' in energies else r'$\mu_0$'
+			ef_tex = r'$E_{\mathrm{F}}$' if 'ef' in energies else r'$\mu$'	
+			
+			if ef is not None and ef0 is not None and ef - ef0 <= -1.0:
+				econtour = ax.contour(k1val, k2val, zdata, [ef, ef0], linewidths = 1.5, colors = 'r', linestyles = ('dashed', 'dotted'), zorder = 4)
+				ax.contourf(k1val, k2val, zdata, [ef, ef0], colors='none', vmin=emin, vmax= emax, hatches = [None, 'oo', None], extend = 'both', zorder=3)
+				econtourtext = {ef: ef_tex, ef0: ef0_tex}
+			elif ef is not None and ef0 is not None and ef - ef0 >= 1.0:
+				econtour = ax.contour(k1val, k2val, zdata, [ef0, ef], linewidths = 1.5, colors = 'r', linestyles = ('dotted', 'dashed'), zorder = 4)
+				ax.contourf(k1val, k2val, zdata, [ef0, ef], colors='none', vmin=emin, vmax= emax, hatches = [None, '..', None], extend = 'both', zorder=3)
+				econtourtext = {ef: ef_tex, ef0: ef0_tex}
+			elif ef is not None:
+				econtour = ax.contour(k1val, k2val, zdata, [ef], linewidths = 1.5, colors = 'r', linestyles = 'dashed', zorder = 4)
+				econtourtext = {ef: ef_tex}
+			elif ef0 is not None:
+				econtour = ax.contour(k1val, k2val, zdata, [ef0], linewidths = 1.5, colors = 'r', linestyles = 'dotted', zorder = 4)
+				econtourtext = {ef0: ef0_tex}
+			else:
+				econtour = None
+			if econtour is not None:
+				plt.clabel(econtour, inline=True, fmt=econtourtext, fontsize=6)
+
+		# Set additional axis for 'auxiliary' plot elements (legends and such)
+		ax_legend = fig.add_axes([margin_l / hsize, margin_b / vsize, 1.0 - (margin_l + margin_r) / hsize, 1.0 - (margin_t + margin_b) / vsize])
+		ax_legend.axis("off")
+		if polar:
+			anglevar = gridvar[1][1:] if gridvar[1] in ['kphi', 'ktheta'] else 'phi'
+			ax_legend.text(0.7, 0.93, "$(k\\,\\cos\\,\\%s,k\\,\\sin\\,\\%s)$" % (anglevar, anglevar), ha = 'right', va = 'baseline', transform = fig.transFigure)
+			ax_legend.text(0.7, 0.88, format_axis_unit(r"$\mathrm{nm}^{-1}$"), ha = 'right', va = 'baseline', transform = fig.transFigure)
+
+		# Character/band label
+		char = None
+		if data_k0 is not None:
+			char = None if data_k0.char is None else data_k0.get_char((lb,))
+			if char is not None and char != '??':
+				set_band_label_2d(char, axis = ax_legend)
+				# ax_legend.text(0.15, 0.93, txt + " ", ha = 'left', va = 'baseline', transform = fig.transFigure)
+		if (char is None or char == '??') and isinstance(lb, (int, np.integer)) and lb != 0:
+			txt = ("$+%i$" % lb) if lb > 0 else ("$-%i$" % -lb)
+			set_band_label_2d(txt, axis = ax_legend)
+
+		# Colorbar/legend
+		if legend:
+			if color is None:
+				add_obs_legend(["colormap", "energy", emin, emax, cmap_energy], obslabel = ("$E$", "$\\mathrm{meV}$"), filename = filename)  # ignore obsrange, use erange instead
+			else:
+				add_obs_legend(color, normalized = normalized, obslabel = obslabel, obsrange = obsrange, narrow = True, filename = filename)
+
+		# Plot title
+		if (title is not None) and (title != ""):
+			txtx, txty, txtha, txtva = get_title_position(title_pos)
+			ax.text(txtx, txty, title, ha = txtha, va = txtva, transform = ax.transAxes)
+
+		if pdfpages is None:
+			plt.savefig("band-%s.pdf" % str(lb).replace("-", "m"))
+		else:
+			pdfpages.savefig(fig)
+		plt.close()
+
+	if pdfpages is not None:
+		pdfpages.close()
diff --git a/kdotpy-v1.0.0/src/kdotpy/ploto/dos.py b/kdotpy-v1.0.0/src/kdotpy/ploto/dos.py
new file mode 100644
index 0000000000000000000000000000000000000000..17d096c1c63f55db7658e4ff8d301cbcc42ff834
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/ploto/dos.py
@@ -0,0 +1,1256 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+import sys
+import warnings
+
+from matplotlib import use as mpluse
+mpluse('pdf')
+import matplotlib.pyplot as plt
+from matplotlib import rcParams
+from matplotlib.backends.backend_pdf import PdfPages
+from matplotlib.patches import Rectangle
+
+from ..config import get_config, get_config_bool, get_config_int, get_config_num
+from .colortools import get_colormap, indexed_color_auto_range
+from .tools import get_fignum, get_plot_size, plot_energies
+from .toolslegend import add_colorbar, get_legend_file
+from .toolstext import format_axis_unit, set_xlabel, set_ylabel
+from .toolsticks import add_frequency_ticks, set_ticks
+from ..phystext import format_value
+
+from ..density import DensityScale, DensityData
+from ..physconst import eoverhbar
+from ..momentum import Vector, VectorGrid
+
+
+def is_range_uniform(val):
+	"""Detect whether array is uniform (linear).
+	Returns True if second discrete derivative (differences of differences) is
+	zero or length is 0, 1, 2."""
+	return len(val) < 3 or np.amax(np.abs(np.diff(np.diff(val)))) < 1e-10
+
+def extend_by_one(arr):
+	"""Extend array by one element.
+	Use [arr[0], (arr[i] + arr[i+1]) / 2, arr[-1]]."""
+	arr = np.asarray(arr)
+	return np.concatenate(((arr[0],), 0.5 * (arr[1:] + arr[:-1]), (arr[-1],)))
+
+def shorten_by_one(arr):
+	"""Shorten array by one element.
+	Use [(arr[i] + arr[i+1]) / 2]."""
+	arr = np.asarray(arr)
+	return 0.5 * (arr[1:] + arr[:-1])
+
+def add_contours(xval, yval, zval, levels, xminmax = None, linewidths = None):
+	"""Add contours to current figure.
+	Used to equal integrated DOS contours to a plot of (local) integrated DOS.
+
+	Arguments:
+	xval, yval   Grid variables
+	zval         Data array
+	levels       z values at which contours are to be drawn.
+	xminmax      None or a 2-tuple. Horizontal extent of the plot. If None,
+	             determine automatically.
+	linewidths   A list of the same length as levels. The line widths of the
+	             individual contours. If None, use default value for all
+	             contours.
+	"""
+	rcParams['contour.negative_linestyle'] = 'solid'
+	levels = np.asarray(levels)
+	if xminmax is None:
+		xmin0, xmax0 = min(xval), max(xval)
+	else:
+		xmin0, xmax0 = tuple(xminmax)
+	if not is_range_uniform(xval):
+		xx1, yy1 = np.meshgrid(xval, yval)
+		contours = plt.contour(xx1, yy1, zval.transpose(), levels, origin = 'lower', extent = (xmin0, xmax0, min(yval), max(yval)), linewidths = 0.75, colors='k')  # removed: aspect = 'auto'
+	else:
+		contours = plt.contour(zval.transpose(), levels, origin = 'lower', extent = (xmin0, xmax0, min(yval), max(yval)), linewidths = 0.75, colors='k')  # removed: aspect = 'auto'
+
+	if isinstance(linewidths, (list, np.ndarray)):
+		if len(linewidths) != len(levels):
+			raise ValueError("Argument linewidths must have the same length as argument levels")
+		if len(contours.collections) != len(levels):
+			levels1 = contours.levels
+			linewidths1 = [linewidths[np.argsort(np.abs(levels - l))[0]] for l in levels1]
+		else:
+			levels1 = levels
+			linewidths1 = linewidths
+		for ci in range(0, len(levels1)):
+			if linewidths1[ci] is not None:
+				plt.setp(contours.collections[ci], linewidth = linewidths1[ci])
+
+def valrange_patch(xmin, xmax, ymin, ymax, vertical=True):
+	"""Wrapper around Rectangle for validity range patch"""
+	patch_style = {'facecolor': '#c02000', 'edgecolor': None, 'alpha': 0.5, 'zorder': -5}
+	if vertical:
+		return Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, **patch_style)
+	else:
+		return Rectangle((ymin, xmin), ymax - ymin, xmax - xmin, **patch_style)
+
+def valrange_edge(xmin, xmax, y, vertical=True):
+	"""Wrapper around plt.plot for a validity range edge"""
+	edge_color = 'r'
+	if vertical:
+		plt.plot([xmin, xmax], [y, y], ':', color=edge_color)
+	else:
+		plt.plot([y, y], [xmin, xmax], ':', color=edge_color)
+
+def add_valrange(ee, idosmin, idosmax, vrmin = None, vrmax=None, vertical=True):
+	ax = plt.gca()
+	if vrmin is not None:
+		if vrmin > min(ee):
+			patch = valrange_patch(idosmin, idosmax, min(ee), vrmin, vertical=vertical)
+			ax.add_patch(patch)
+		valrange_edge(idosmin, idosmax, vrmin, vertical=vertical)
+	if vrmax is not None:
+		if vrmax < max(ee):
+			patch = valrange_patch(idosmin, idosmax, max(ee), vrmax, vertical=vertical)
+			ax.add_patch(patch)
+		valrange_edge(idosmin, idosmax, vrmax, vertical=vertical)
+
+def dos_idos(params, densitydata, outputid = "", filename = "", title = None, density_range = None, **plotopts):
+	"""Density of states (DOS) and integrated density of states (IDOS) plots; wrapper function
+
+	Arguments:
+	params          PhysParams instance.
+	densitydata     DensityData instance. Container for IDOS vs energy values.
+	outputid        String that is inserted into the filenames.
+	filename        NOT USED
+	title           Plot title
+	density_range   None or 2-tuple. If set, then use the values in order to
+	                determine the coorect scale for the density axis.
+	**plotopts      Additional plot options, which are ignored by this function.
+	"""
+	# Extract data from DensityData container (shortcuts)
+	if not isinstance(densitydata, DensityData):
+		raise TypeError("Argument densitydata must be a DensityData instance")
+
+	fig_idos = None
+	if densitydata.get_idos() is not None:
+		fig_idos = integrated_dos(
+			params, densitydata, outputid=outputid, title=title,
+			density_range=density_range, **plotopts
+		)
+	fig_dos = None
+	if densitydata.get_dos() is not None:
+		fig_dos = dos(
+			params, densitydata, outputid=outputid, title=title,
+			density_range=density_range, **plotopts
+		)
+	return fig_idos, fig_dos
+
+def integrated_dos(params, densitydata, outputid = "", filename = "", title = None, density_range = None, **plotopts):
+	"""Integrated density of states (IDOS) plot.
+
+	See dos() for more information.
+	"""
+	unit_negexp = get_config_bool('plot_dos_units_negexp')
+	vertical = get_config_bool('plot_dos_vertical')
+	valrange = get_config_bool('plot_dos_validity_range')
+	idos_fill = get_config_bool('plot_idos_fill')
+	dos_color = get_config('plot_dos_color')
+	dens_qty = get_config('dos_quantity')
+	dens_unit = get_config('dos_unit')
+	e0_color = rcParams.get('lines.color')
+
+	# Extract data from DensityData container (shortcuts)
+	if not isinstance(densitydata, DensityData):
+		raise TypeError("Argument densitydata must be a DensityData instance")
+	ee = densitydata.ee
+	energies = densitydata.get_special_energies()
+	vrmin, vrmax = densitydata.get_validity_range()
+
+	# Scale automatically and determine the units (but do not set plot limits)
+	if isinstance(density_range, list) and len(density_range) == 2:
+		densrange = [-density_range[1], density_range[1]] if density_range[0] is None else density_range
+	else:
+		densrange = None
+	densitydata.set_scale(dens_qty, dens_unit, scaled_limits = densrange)
+	dscale = densitydata.get_scale()
+	idos = densitydata.get_idos(scaled = True)
+	if idos is None:
+		sys.stderr.write("Warning (ploto.integrated_dos): No IDOS data.\n")
+		return None
+	qstr = densitydata.qstr(style = 'tex', integrated = True, scaled = True)
+	ustr = densitydata.unitstr(style = 'tex', integrated = True, scaled = True, negexp = unit_negexp)
+	idosmin, idosmax = (idos[0], idos[-1]) if densrange is None else densrange if dscale is None else (dscale.scaledmin, dscale.scaledmax)
+
+	# IDOS figure
+	fig = plt.figure(get_fignum(), figsize = get_plot_size('s'))
+	plt.subplots_adjust(**get_plot_size('subplot'))
+	ax = fig.add_subplot(1, 1, 1)
+
+	if vertical:
+		if idos_fill:
+			plt.fill_betweenx(ee, idos, 0, edgecolor=None, facecolor=dos_color, alpha=0.5, zorder=-7)
+		plt.plot(idos, ee, '-', color=dos_color, zorder=-6)
+		plt.plot([0.0, 0.0], [min(ee), max(ee)], ':', color=e0_color)
+		if valrange:
+			add_valrange(ee, idosmin, idosmax, vrmin=vrmin, vrmax=vrmax, vertical=vertical)
+		plt.axis([idosmin, idosmax, min(ee), max(ee)])
+		if get_config_bool('plot_dos_energies'):
+			plot_energies(energies, xval = [idosmin, idosmax])
+		set_ylabel('$E$', '$\\mathrm{meV}$')
+		set_xlabel(qstr, ustr)
+		set_ticks()
+	else:
+		if idos_fill:
+			plt.fill_between(ee, idos, 0, edgecolor=None, facecolor=dos_color, alpha=0.5, zorder=-7)
+		plt.plot(ee, idos, '-', color=dos_color, zorder=-6)
+		plt.plot([min(ee), max(ee)], [0.0, 0.0], ':', color=e0_color)
+		if valrange:
+			add_valrange(ee, idosmin, idosmax, vrmin=vrmin, vrmax=vrmax, vertical=vertical)
+		plt.axis([min(ee), max(ee), idosmin, idosmax])
+		if get_config_bool('plot_dos_energies'):
+			plot_energies(energies, yval = [idosmin, idosmax])
+		set_xlabel('$E$', '$\\mathrm{meV}$')
+		set_ylabel(qstr, ustr)
+		set_ticks()
+
+	if (title is not None) and (title != ""):
+		ax.text(0.5, 0.98, title, ha='center', va='top', transform=ax.transAxes)
+
+	plt.savefig("dos-integrated%s.pdf" % outputid)
+
+	return fig
+
+def dos(params, densitydata, outputid = "", filename = "", title = None, density_range = None, **plotopts):
+	"""Density of states (DOS) plot.
+
+	See dos() for more information.
+	"""
+	unit_negexp = get_config_bool('plot_dos_units_negexp')
+	vertical = get_config_bool('plot_dos_vertical')
+	valrange = get_config_bool('plot_dos_validity_range')
+	dos_fill = get_config_bool('plot_dos_fill')
+	dos_color = get_config('plot_dos_color')
+	dens_qty = get_config('dos_quantity')
+	dens_unit = get_config('dos_unit')
+
+	# Extract data from DensityData container (shortcuts)
+	if not isinstance(densitydata, DensityData):
+		raise TypeError("Argument densitydata must be a DensityData instance")
+	ee = densitydata.ee
+	energies = densitydata.get_special_energies()
+	vrmin, vrmax = densitydata.get_validity_range()
+
+	# Scale automatically and determine the units (but do not set plot limits)
+	if isinstance(density_range, list) and len(density_range) == 2:
+		densrange = [-density_range[1], density_range[1]] if density_range[0] is None else density_range
+	else:
+		densrange = None
+	densitydata.set_scale(dens_qty, dens_unit, scaled_limits = densrange)
+	dscale = densitydata.get_scale()
+
+	# DOS figure
+	fig = plt.figure(get_fignum(), figsize = get_plot_size('s'))
+	plt.subplots_adjust(**get_plot_size('subplot'))
+	ax = fig.add_subplot(1, 1, 1)
+
+	# Get (scaled) DOS values
+	ee1 = (ee[1:] + ee[:-1]) / 2.0
+	dos = densitydata.get_dos(derivative = 'diff', scaled = True)
+	if dos is None:
+		sys.stderr.write("Warning (ploto.dos): No DOS data.\n")
+		return None
+	qstr = densitydata.qstr(style = 'tex', integrated = False, scaled = True)
+	ustr = densitydata.unitstr(style = 'tex', integrated = False, scaled = True, negexp = unit_negexp)
+	dosmin, dosmax = 0.0, 1.1 * max(dos)
+	# Round dosmax up to a * 10**b wbere a and b are integer values
+	dosmax_exp = np.floor(np.log10(dosmax))
+	dosmax_val = np.ceil(dosmax / 10**dosmax_exp)
+	dosmax = dosmax_val * 10**dosmax_exp
+
+	if vertical:
+		if dos_fill:
+			plt.fill_betweenx(ee1, dos, 0, edgecolor=None, facecolor=dos_color, alpha=0.5, zorder=-7)
+		plt.plot(dos, ee1, '-', color=dos_color, zorder=-6)
+		if valrange:
+			add_valrange(ee, 0.0, 1.1 * dosmax, vrmin=vrmin, vrmax=vrmax, vertical=vertical)
+		plt.axis([dosmin, dosmax, min(ee), max(ee)])
+		if get_config_bool('plot_dos_energies'):
+			plot_energies(energies, xval = [dosmin, dosmax])
+		set_ylabel('$E$', '$\\mathrm{meV}$')
+		set_xlabel(qstr, ustr)
+		set_ticks()
+	else:
+		if dos_fill:
+			plt.fill_between(ee1, dos, 0, edgecolor=None, facecolor=dos_color, alpha=0.5, zorder=-7)
+		plt.plot(ee1, dos, '-', color=dos_color, zorder=-6)
+		if valrange:
+			add_valrange(ee, 0.0, 1.1 * dosmax, vrmin=vrmin, vrmax=vrmax, vertical=vertical)
+		plt.axis([min(ee), max(ee), dosmin, dosmax])
+		if get_config_bool('plot_dos_energies'):
+			plot_energies(energies, yval = [dosmin, dosmax])
+		set_xlabel('$E$', '$\\mathrm{meV}$')
+		set_ylabel(qstr, ustr)
+		set_ticks()
+
+	if (title is not None) and (title != ""):
+		ax.text(0.5, 0.98, title, ha='center', va='top', transform=ax.transAxes)
+
+	plt.savefig("dos%s.pdf" % outputid)
+
+	return fig
+
+def local_density(params, densitydata, integrated = False, **kwds):
+	"""Wrapper around density_plot() for densitydata input"""
+	kwds['ll'] = densitydata.ll
+	kwds['zunit'] = 'nm' if densitydata.scale is None else densitydata.scale
+	kwds['energies'] = densitydata.special_energies
+	plotdata = densitydata.xyz_idos() if integrated else densitydata.xyz_dos()
+	if any(d is None for d in plotdata):
+		sys.stderr.write("Warning (ploto.local_density): " + ("IDOS" if integrated else "DOS") + " is not defined.\n")
+		return None
+	return density2d(params, *plotdata, integrated = integrated, **kwds)
+
+def densityz_energy(params, zval, eeval, densityz, integrated = False, **kwds):
+	"""Wrapper around density2d() for 'densityz' input
+	
+	TODO: densityz will become its own class in the future
+	"""
+	return density2d(
+		params, zval, eeval, densityz, integrated = integrated, **kwds)
+
+def density2d(
+		params, xval, yval, zval, outputid = "", energies = None, filename = "",
+		title = None, interpolate = True, xlabel = None, ylabel = None,
+		yunit = None, zunit = None, xrange = None, zrange = None,
+		colormap = "Blues", legend = False, posneg = False, contours = False,
+		contoursdata = None, contoursval = None, ll = False, integrated = False,
+		frequency_ticks = False, **plotopts):
+	"""Generic function used for local density of states (DOS) plots and the like.
+	The horizontal and vertical axis can be any quantity, as can be the data.
+
+	Arguments:
+	params          PhysParams instance.
+	xval            Array of values on the horizontal axis.
+	yval            Array of values on the vertical axis.
+	zval            Data array. The size should correspond to the lengths of
+	                xval and yval.
+	outputid        String that is inserted into the filename. Only meaningful
+	                if filename is None.
+	energies        A dict instance with special energies. This is used to show
+	                dashed lines at the Fermi energy, charge neutrality point,
+	                etc. See tools.plot_energies(). If None, do not plot
+	                special energies.
+	filename        Output filename. If None, use dos-localxxx.pdf, where xxx is
+	                the outputid.
+	title           Plot title
+	interpolate     Whether to apply interpolation to the data. If xval is a
+	                uniform array, then use the interpolation method as
+	                specified (interpolate being a string) or the 'bilinear'
+	                method if interpolate is True or no interpolation if
+	                interpolate is False or None. If xval is a non-uniform
+	                array, use 'flat' if interpolate is False or None, otherwise
+	                use 'gouraud'. See matplotlib documentation for functions
+	                pcolormesh() and imshow() for more information on the
+	                interpolation methods.
+ 	xlabel          Set label on the x axis. Use 'k_x' if it is not set.
+	ylabel          Set label on the y axis. Use 'E' (energy) if it is not set.
+	yunit           If a string, use this density unit for the y axis. If True,
+	                use the density unit from the DensityData instance. If
+	                False, do not scale.
+	zunit           If a string, use this density unit for the data ('z
+	                values'). If True, use the density unit from the DensityData
+	                instance. If False, do not scale.
+	xrange          None or 2-tuple. Extent of the horizontal axis. If None,
+	                determine automatically.
+	zrange          None or 2-tuple. Minimum and maximum value of the colour
+	                scale. If None, determine automatically.
+	colormap        A matplotlib or kdotpy colormap id. Used for the colour
+	                scale.
+	legend          If True, draw a legend. If it is a string, draw a legend and
+	                use this string as its label. If False, do not draw a
+	                legend.
+	posneg          Indicates whether the data is a strictly positive quantity
+	                (posneg = False) or can also take negative values (posneg =
+	                True).
+	contours        If True, draw contours at automatically determined values.
+	contoursdata    An array that contains the coordinates of one or more
+	                contours (containing the y values as function of the x
+	                values in xval). The array may be one-dimensional (single
+	                contour) or two-dimensional (multiple contours).
+	contoursval     NOT USED
+	ll              Indicate whether one is plotting Landau levels (if so, set
+	                it to True). This affects the density scaling.
+	integrated      Indicate whether the quantity is an integrated density (if
+	                so, set it to True). This affects the displayed units that
+	                correspond to density.
+	frequency_ticks  If True, add frequency ticks at the inner edge of the
+	                 left-hand axis, like in the transitions plot.
+	**plotopts      Additonal plot options, which are ignored by this function.
+
+	Note:
+	The options contours and contoursdata can be used at the same time.
+
+	Returns:
+	A matplotlib figure instance.
+	"""
+	if zval is None:
+		sys.stderr.write("ERROR (ploto.local_dos): No data (argument zval).\n")
+		return
+
+	fig = plt.figure(get_fignum(), figsize = get_plot_size('s', legend = legend))
+	plt.subplots_adjust(**get_plot_size('subplot', legend = legend))
+	ax = fig.add_subplot(1, 1, 1)
+	colormap = get_colormap(colormap)
+	unit_negexp = get_config_bool('plot_dos_units_negexp')
+	dens_qty = get_config('dos_quantity')
+	dens_unit = get_config('dos_unit')
+
+	if not isinstance(xval, (VectorGrid, list, np.ndarray)) and len(xval) > 0:
+		sys.stderr.write("ERROR (ploto.local_dos): Argument xval must be a non-empty list or array.\n")
+		return
+
+	if isinstance(xval, VectorGrid):
+		xval1 = xval.get_values(None)
+	elif isinstance(xval, (list, np.ndarray)) and len(xval) > 0 and isinstance(xval[0], Vector):
+		xval1 = np.array([k.len() for k in xval])
+	else:  # list/array of numbers
+		xval1 = xval
+	xmin0 = min(xval1)
+	xmax0 = max(xval1)
+	if isinstance(xrange, (list, tuple)) and len(xrange) == 2:
+		xmin = min(xrange)
+		xmax = max(xrange)
+	elif xrange is None:
+		extend_xaxis = get_config_num('fig_extend_xaxis', minval = 0)
+		xmin = xmin0 - extend_xaxis * (xmax0 - xmin0) if extend_xaxis > 0 else xmin0
+		xmax = xmax0 + extend_xaxis * (xmax0 - xmin0) if extend_xaxis > 0 else xmax0
+	else:
+		raise TypeError("Argument xrange must be a list/tuple of length 2 or None")
+
+	z90 = np.percentile(np.abs(zval), 90.0) if posneg else np.percentile(zval, 95.0)
+	if isinstance(zunit, DensityScale):
+		zscale = zunit
+		zval = zscale.scaledvalues(zval)
+	elif zunit is True:
+		zscale = DensityScale(zval, dens_qty, dens_unit, ll = ll, kdim = 2 if ll else params.kdim)
+		zval = zscale.scaledvalues()
+	else:
+		zscale = None
+
+	if isinstance(yunit, DensityScale):
+		yscale = yunit
+		yval = yscale.scaledvalues(yval)
+	elif yunit is True:
+		yscale = DensityScale(yval, dens_qty, dens_unit, ll = ll, kdim = 2 if ll else params.kdim)
+		yval = yscale.scaledvalues()
+	else:
+		yscale = None
+
+	if zrange is not None:
+		vmin = -max(abs(zrange[0]), abs(zrange[1])) if posneg else zrange[0]
+		vmax = max(abs(zrange[0]), abs(zrange[1])) if posneg else zrange[1]
+	else:  # automatic scaling
+		absvmax = 1.0
+		for vmax in [0.0001, 0.0002, 0.0004, 0.0006, 0.001, 0.002, 0.004, 0.006, 0.01, 0.02, 0.04, 0.06, 0.1, 0.2, 0.4, 0.6]:
+			if 1.2 * z90 < vmax:
+				absvmax = vmax
+				break
+		vmax = absvmax if zscale is None else zscale.scaledvalues(absvmax)
+		vmin = -vmax if posneg else 0.0
+
+	## Draw colour (background)
+	if not is_range_uniform(xval1):
+		if interpolate:
+			shading_method = 'gouraud'
+			pxval = shorten_by_one(xval1) if len(xval1) == zval.shape[0] + 1 else xval1
+			pyval = shorten_by_one(yval) if len(yval) == zval.shape[1] + 1 else yval
+		else:
+			shading_method = 'flat'
+			pxval = xval1 if len(xval1) == zval.shape[0] + 1 else extend_by_one(xval1)
+			pyval = yval if len(yval) == zval.shape[1] + 1 else extend_by_one(yval)
+		rasterized = get_config_bool('plot_rasterize_pcolormesh')
+		plt.pcolormesh(
+			pxval, pyval, zval.transpose(), cmap = colormap, vmin = vmin,
+			vmax = vmax, shading = shading_method, rasterized = rasterized)
+	else:
+		interpolation_method = None if interpolate is False else 'bilinear' if interpolate is True else interpolate
+		dx = 0.5 * (xval1[1] - xval1[0])
+		left = xmin0 if len(xval1) == zval.shape[0] + 1 else xmin0 - dx
+		right = xmax0 if len(xval1) == zval.shape[0] + 1 else xmax0 + dx
+		dy = 0.5 * (yval[1] - yval[0])
+		bottom = min(yval) if len(yval) == zval.shape[1] + 1 else min(yval) - dy
+		top = max(yval) if len(yval) == zval.shape[1] + 1 else max(yval) + dy
+		plt.imshow(np.clip(zval.transpose(), vmin, vmax), cmap = colormap, origin = 'lower', extent = (left, right, bottom, top), aspect = 'auto', vmin = vmin, vmax = vmax, interpolation = interpolation_method)
+
+	## Draw contours (preset)
+	if contours:
+		if vmax <= 0.03:
+			levels = np.arange(-0.015, 0.0151, 0.001) if posneg else np.arange(0.002, vmax + 0.0001, 0.002)
+		elif vmax <= 0.3:
+			levels = np.arange(-0.15, 0.151, 0.01) if posneg else np.arange(0.02, vmax + 0.0001, 0.02)
+		elif vmax <= 1.5:
+			levels = np.arange(-1.0, 1.01, 0.1) if posneg else np.arange(0.2, vmax + 0.0001, 0.2)
+		else:
+			levels = np.arange(-np.ceil(vmax) - 0.5, np.ceil(vmax) + 0.51, 1.0) if posneg else np.arange(0.5, np.ceil(vmax) + 0.51, 1.0)
+		c0 = np.argmin(np.abs(levels))
+		if levels[0] > 0:
+			linewidths = [2.0 if (ci + 1) % 10 == 0 else 1.25 if (ci + 1) % 5 == 0 else 0.75 for ci in range(0, len(levels))]
+		elif np.abs(levels[c0]) > 0.1:
+			linewidths = [2.0 if np.abs(lv) < 0.9 else 0.75 for lv in levels]
+		else:
+			linewidths = [2.0 if (ci - c0) % 10 == 0 else 1.25 if (ci - c0) % 5 == 0 else 0.75 for ci in range(0, len(levels))]
+		add_contours(xval1, yval, zval, levels, xminmax = [xmin0, xmax0], linewidths = linewidths)
+	## Draw contours (from input)
+	if isinstance(contoursdata, np.ndarray) and len(contoursdata.shape) == 1:
+		plt.plot(xval1, contoursdata, 'r-')
+	elif isinstance(contoursdata, np.ndarray) and len(contoursdata.shape) == 2:
+		for cdata in contoursdata:
+			plt.plot(xval1, cdata, 'r-')
+	if get_config_bool('plot_dos_energies'):
+		plot_energies(energies, xval = [xmin, xmax])
+	plt.axis([xmin, xmax, min(yval), max(yval)])
+	if isinstance(xlabel, str) and len(xlabel) > 0:
+		plt.xlabel(xlabel)
+	else:
+		set_xlabel('$k_x$', '$\\mathrm{nm}^{-1}$')
+	if isinstance(ylabel, str) and len(ylabel) > 0:
+		plt.ylabel(ylabel)
+	elif isinstance(yscale, DensityScale):  # i.e., it is an IDOS
+		yqstr = yscale.qstr(style = 'tex', integrated = True)
+		yunitstr = yscale.unitstr(style = 'tex', integrated = True, negexp = unit_negexp)
+		set_ylabel(str(yqstr), yunitstr)
+	else:
+		set_ylabel('$E$', '$\\mathrm{meV}$')
+	set_ticks()
+	if frequency_ticks:
+		add_frequency_ticks()
+
+	if legend:
+		legend_filename = get_legend_file(filename)
+		if zscale is not None:
+			zqstr = zscale.qstr(style = 'tex', integrated = integrated)
+			zunitstr = zscale.unitstr(style = 'tex', integrated = integrated, negexp = unit_negexp)
+		legend_label = legend if isinstance(legend, str) else None if zscale is None else zqstr if zunitstr is None else "%s\n%s" % (zqstr, format_axis_unit(zunitstr))
+		add_colorbar(vmin, vmax, cmap = colormap, label = legend_label, label_y1 = -0.05, label_y2 = -0.05, filename = legend_filename)
+
+	if (title is not None) and (title != ""):
+		ax.text(0.5, 0.98, title, ha='center', va='top', transform=ax.transAxes)
+
+	if filename is None or len(filename) == 0:
+		filename = "dos-local%s.pdf" % outputid
+	plt.savefig(filename)
+	# plt.close()
+	return fig
+
+def dos_ll(
+		params, bval, ee, ndos, outputid = "", energies = None, filename = "",
+		title = None, interpolate = True, xlabel = None, xrange = None,
+		colormap = "Blues", contours = False, contoursdata = False,
+		contoursval = None, legend = False, **plotopts):
+	"""Plot density of states for LL data
+
+	Arguments:
+	params        PhysParams instance.
+	bval          Array of magnetic field (b) values (horizontal axis)
+	ee            Array of energy values (vertical axis)
+	ndos          Numeric density of states (number of filled LLs below, counted
+	              from the charge neutrality point). This is a two-dimensional
+	              array whose dimensions correspond to the lengths of bval and
+	              ee.
+	outputid      String inserted into the filename. Only meaningful when
+	              filename is None or empty.
+	energies      A dict instance with special energies. This is used to show
+	              dashed lines at the Fermi energy, charge neutrality point,
+	              etc. See tools.plot_energies(). If None, do not plot special
+	              energies.
+	filename      Output filename. If not set, use dos-numericxxx.pdf, where
+	              xxx is the outputid.
+	title         Plot title
+	interpolate   Whether to apply interpolation to the data. If xval is a
+	              uniform array, then use the interpolation method as
+	              specified (interpolate being a string) or the 'bilinear'
+	              method if interpolate is True or no interpolation if
+	              interpolate is False or None. If xval is a non-uniform array,
+	              use 'flat' if interpolate is False or None, otherwise use
+	              'gouraud'. See matplotlib documentation for functions
+	              pcolormesh() and imshow() for more information on the
+	              interpolation methods.
+	xlabel        Label on the x axis. If None, use 'B' (magnetic field).
+	xrange        None or 2-tuple. Extent of the horizontal axis. If None,
+	              determine automatically.	
+	colormap      A matplotlib or kdotpy colormap.
+	contours      If True, draw contours at automatically determined values.
+	contoursdata  An array that contains the coordinates of one or more
+	              contours (containing the y values as function of the x values
+	              in xval). The array may be one-dimensional (single contour) or
+	              two-dimensional (multiple contours).
+	contoursval   NOT USED
+	legend        If True, draw a legend. If it is a string, draw a legend and
+	              use this string as its label. If False, do not draw a legend.
+	**plotopts    Additonal plot options, which are ignored by this function.
+
+	Returns:
+	A matplotlib figure instance.
+	"""
+	fig = plt.figure(get_fignum(), figsize = get_plot_size('s', legend = legend))
+	plt.subplots_adjust(**get_plot_size('subplot', legend = legend))
+	ax = fig.add_subplot(1, 1, 1)
+
+	if isinstance(bval, VectorGrid) and len(bval) > 0:
+		bval = bval.get_values('b')
+	elif isinstance(bval, (list, np.ndarray)) and len(bval) > 0:
+		bval = np.asarray(bval)
+	else:
+		sys.stderr.write("ERROR (ploto.dos_ll): Magnetic-field values bval must be a non-empty list or array.\n")
+		return
+	bmin0 = min(bval)
+	bmax0 = max(bval)
+	if isinstance(xrange, (list, tuple)) and len(xrange) == 2:
+		bmin = min(xrange)
+		bmax = max(xrange)
+	elif xrange is None:
+		extend_xaxis = get_config_num('fig_extend_xaxis', minval = 0)
+		bmin = bmin0 - extend_xaxis * (bmax0 - bmin0) if extend_xaxis > 0 else bmin0
+		bmax = bmax0 + extend_xaxis * (bmax0 - bmin0) if extend_xaxis > 0 else bmax0
+	else:
+		raise TypeError("Argument xrange must be a list/tuple of length 2 or None")
+	nmax = min(np.ceil(np.max(np.abs(ndos)[1:, :])), 10)
+	colormap = get_colormap(colormap)
+	vmin, vmax = indexed_color_auto_range(colormap)
+	if vmin is None and vmax is None:
+		vmin, vmax = -nmax, nmax
+
+	zdata = np.ma.masked_where(np.isnan(ndos), ndos).transpose()  # prevent warnings that occur if NaN values are present
+	## Draw colour (background)
+	if not is_range_uniform(bval):
+		if interpolate:
+			shading_method = 'gouraud'
+			pxval = shorten_by_one(bval) if len(bval) == ndos.shape[0] + 1 else bval
+			pyval = shorten_by_one(ee) if len(ee) == ndos.shape[1] + 1 else ee
+		else:
+			shading_method = 'flat'
+			pxval = bval if len(bval) == ndos.shape[0] + 1 else extend_by_one(bval)
+			pyval = ee if len(ee) == ndos.shape[1] + 1 else extend_by_one(ee)
+		rasterized = get_config_bool('plot_rasterize_pcolormesh')
+		plt.pcolormesh(
+			pxval, pyval, zdata, cmap = colormap, vmin = vmin, vmax = vmax,
+			shading = shading_method, rasterized = rasterized)
+	else:
+		interpolation_method = None if interpolate is False else 'bilinear' if interpolate is True else interpolate
+		dx = 0.5 * (bval[1] - bval[0])
+		left = bmin0 if len(bval) == ndos.shape[0] + 1 else bmin0 - dx
+		right = bmax0 if len(ee) == ndos.shape[0] + 1 else bmax0 + dx
+		dy = 0.5 * (ee[1] - ee[0])
+		bottom = min(ee) if len(ee) == ndos.shape[1] + 1 else min(ee) - dy
+		top = max(ee) if len(ee) == ndos.shape[1] + 1 else max(ee) + dy
+		plt.imshow(np.clip(zdata, vmin, vmax), cmap = colormap, origin = 'lower', extent = (left, right, bottom, top), aspect = 'auto', vmin = vmin, vmax = vmax, interpolation = interpolation_method)
+
+	if contours:
+		levels = np.arange(-9.5, 9.6, 1.0)
+		linewidths = [2.0 if np.abs(lv) < 0.9 else 0.75 for lv in levels]
+		add_contours(bval, ee, ndos, levels, linewidths = linewidths)
+	if isinstance(contoursdata, np.ndarray) and len(contoursdata.shape) == 1:
+		plt.plot(bval, contoursdata, 'r-')
+	elif isinstance(contoursdata, np.ndarray) and len(contoursdata.shape) == 2:
+		for cdata in contoursdata:
+			plt.plot(bval, cdata, 'r-')
+	plt.axis([bmin, bmax, min(ee), max(ee)])
+	if isinstance(xlabel, str) and len(xlabel) > 0:
+		plt.xlabel(xlabel)
+	else:
+		set_xlabel('$B$', '$\\mathrm{T}$')
+	set_ylabel('$E$', '$\\mathrm{meV}$')
+	set_ticks()
+
+	if legend:
+		legend_filename = get_legend_file(filename)
+		legend_label = legend if isinstance(legend, str) else "NDOS\n(# levels)"
+		add_colorbar(vmin, vmax, cmap = colormap, label = legend_label, label_y1 = -0.05, label_y2 = -0.05, filename = legend_filename)
+	if filename is None or len(filename) == 0:
+		filename = "dos-numeric%s.pdf" % outputid
+
+	if (title is not None) and (title != ""):
+		ax.text(0.5, 0.98, title, ha='center', va='top', transform=ax.transAxes)
+
+	plt.savefig(filename)
+	# plt.close()
+	return fig
+
+def subdiv_minmax(xval, yval):
+	"""Subdivide an array of y values to match a coarser array of x values, keeping in addition the extrema of y.
+	Sometimes, one ends up with an array of y values that has been evaluated at
+	a subdivision of the array of x values, but the subdivided array of x values
+	is not readily available. As arrays of different lengths cannot be plotted,
+	one needs to 'align' the values. This function takes the y values at the
+	(coarser) x values, and outputs these. In order to not lose information
+	about the extrema, the extrema (x and y values) are inserted where
+	necessary. The function returns two (equally long) arrays, slightly longer
+	than the input xval.
+
+	Note:
+	The function works only if the array yval is a subdivision of the array
+	xval, i.e., len(yval) - 1 must be an integer multiple of len(xval) - 1.
+	"""
+	if len(yval) > len(xval) and (len(yval)-1) % (len(xval)-1) == 0:
+		subdiv = (len(yval)-1) // (len(xval)-1)
+	else:
+		return xval, yval
+	if isinstance(xval, VectorGrid):
+		xval = xval.get_values(xval.prefix)  # TODO: May not always be the correct choice
+	else:
+		xval = np.asarray(xval)
+	yval = np.asarray(yval)
+	xval_ip = np.array([(1. - j / subdiv) * np.array(xval)[:-1] + (j / subdiv) * np.array(xval)[1:] for j in range(0, subdiv)])
+	xval_ip = np.concatenate((np.hstack(xval_ip.transpose()), np.array(xval)[-1:]), axis=0)
+
+	threshold_value = 1.05  # setting this value to != 1 is appropriate only if yval >= 0 everywhere
+	new_xval = []
+	new_yval = []
+
+	xmin = []
+	xmax = []
+	# Gather extra points at minima and maxima
+	for i in range(1, len(xval)-1):
+		if yval[subdiv*(i-1)] is None or yval[subdiv*i] is None or yval[subdiv*(i+1)] is None:
+			pass
+		elif threshold_value * yval[subdiv * (i-1)] < yval[subdiv * i] and threshold_value * yval[subdiv * (i+1)] < yval[subdiv * i]:  # maximum
+			j = np.argmax(yval[subdiv * (i-1) + 1:subdiv * (i+1)])
+			xmax.append(xval_ip[subdiv * (i-1) + 1 + j])
+			if j != subdiv - 1:
+				new_xval.append(xval_ip[subdiv * (i-1) + 1 + j])
+				new_yval.append(yval[subdiv * (i-1) + 1 + j])
+		elif yval[subdiv * (i-1)] > threshold_value * yval[subdiv * i] and yval[subdiv * (i+1)] > threshold_value * yval[subdiv * i]:  # minimum
+			j = np.argmin(yval[subdiv * (i-1) + 1:subdiv * (i+1)])
+			xmin.append(xval_ip[subdiv * (i-1) + 1 + j])
+			if j != subdiv - 1:
+				new_xval.append(xval_ip[subdiv * (i-1) + 1 + j])
+				new_yval.append(yval[subdiv * (i-1) + 1 + j])
+
+	# Show positions of maxima and minima
+	if 'verbose' in sys.argv:
+		print("Maxima, 1/B =", np.sort(1. / np.array(xmax)))
+		print("Delta(1/B)  =", np.diff(np.sort(1. / np.array(xmax))))
+		print("Minima, 1/B =", np.sort(1. / np.array(xmin)))
+		print("Delta(1/B)  =", np.diff(np.sort(1. / np.array(xmin))))
+
+	# Put coarse arrays and intermediate values together, and sort
+	new_xval = np.concatenate((xval, np.array(new_xval)))
+	new_yval = np.concatenate((yval[::subdiv], np.array(new_yval)))
+	order = np.argsort(new_xval)
+	return new_xval[order], new_yval[order]
+
+def at_constant_dens_ll(
+	xval, densval, zval, filename = "", legend = False, high_resolution = True,
+	xlabel = None, ylabel = None, xrange = None, yrange = None,	omit_empty = True,
+	is_ldos = True, reciprocal = False, extra_function = None, **plotopts):
+	"""Plot quantity at constant density.
+	This function provides a single plot (line plot) of a quantity as function
+	of an 'x value' (e.g., momentum or magnetic field) for each of the specified
+	density values.
+
+	Arguments:
+	xval             Array of values on horizontal axis
+	densval          Array of density values
+	zval             Data array (two-dimensional): Quantity as function of x and
+	                 density.
+	filename         Output file name
+	legend           NOT USED
+	high_resolution  If True, use subdivision of x values between the specified,
+	                 i.e., insert extra values between existing x values.
+	xlabel, ylabel   Labels on the horizontal and vertical axes.
+	xrange           Extent of the horizontal axis. If None, determine
+	                 automatically.
+	yrange           Extent of the vertical axis. If None, use default.
+	omit_empty       If True, do not incluse plots at lower and higher density
+	                 values for which there is no data (NaN values).
+	is_ldos          If True, apply density scaling to the quantity (zval).
+	reciprocal       If True, use 1/x as plot variable on the horizontal axis.
+	                 If False, use x.
+	extra_function   A callable with two arguments. The function is evaluated
+	                 as f(x, n) where x and n are substituted from xval and
+	                 densval, respectively. An example would be
+	                 f(b, n) = b / (n * e) for the classical Hall resistance.
+	**plotopts       Additonal plot options, which are ignored by this function.
+
+	No return value.
+	"""
+	debug = False  # set to True for debug output
+	unit_negexp = get_config_bool('plot_dos_units_negexp')
+	curve_color = get_config('plot_constdens_color')
+	sdh_color = get_config('plot_sdh_markers_color')
+
+	zval = np.array(zval, dtype = float)
+	if np.all(np.isnan(zval)):
+		sys.stderr.write("ERROR (ploto.at_constant_dens_ll): Data does not contain numerical values.\n")
+		return
+	pdfpages = PdfPages(filename) if filename is not None and filename != "" else None
+
+	dens_qty = get_config('dos_quantity')
+	dens_unit = get_config('dos_unit')
+	densval_in = 1.0 * densval  # cache input value (not affected by DensityScale)
+	dscale = DensityScale(np.array(densval), dens_qty, dens_unit, kdim = 2, ll = True)
+	densval = dscale.scaledvalues()
+
+	if is_ldos:
+		zscale = DensityScale(zval, dens_qty, dens_unit, kdim = 2, ll = True)
+		zval = zscale.scaledvalues()
+	else:
+		zscale = None
+
+	if isinstance(xval, VectorGrid):
+		xval = xval.get_values(None)  # Not always the right choice
+	elif isinstance(xval, (list, np.ndarray)) and len(xval) > 0 and isinstance(xval[0], Vector):
+		xval = np.array([x.component(None) for x in xval])
+	if reciprocal:
+		xmin = 0.0
+		xmax = np.max(1. / xval[xval != 0])
+		if np.count_nonzero(np.abs(densval) > 1e-5):
+			# Find minimum x for which there is data, but do not consider density = 0
+			isnumx = np.any(~np.isnan(zval[np.abs(densval) > 1e-5, :]), axis = 0)
+			if zval.shape[1] > xval.shape[0] and (zval.shape[1] - 1) % (xval.shape[0] - 1) == 0:
+				subdiv = (zval.shape[1] - 1) // (xval.shape[0] - 1)
+				sel = isnumx[::subdiv] & (xval != 0.0)
+			else:
+				sel = isnumx & (xval != 0.0)
+			xmin1 = 0.0 if np.count_nonzero(sel) == 0 else np.min(xval[sel])
+			if xmin1 > 0.0:
+				xmax = 1. / xmin1
+	else:
+		if isinstance(xrange, (list, tuple)) and len(xrange) == 2:
+			xmin = min(xrange)
+			xmax = max(xrange)
+		elif xrange is None:
+			xmin0, xmax0 = min(xval), max(xval)
+			extend_xaxis = get_config_num('fig_extend_xaxis', minval = 0)
+			xmin = xmin0 - extend_xaxis * (xmax0 - xmin0) if extend_xaxis > 0 else xmin0
+			xmax = xmax0 + extend_xaxis * (xmax0 - xmin0) if extend_xaxis > 0 else xmax0
+		else:
+			raise TypeError("Argument xrange must be a list/tuple of length 2 or None")
+
+	# Do not display the outer values for which there is not data
+	if omit_empty:
+		sel = ~np.all(np.isnan(zval), axis=1)
+		selmin = np.min(densval[sel])
+		selmax = np.max(densval[sel])
+		sel = (densval >= selmin) & (densval <= selmax)
+	else:
+		sel = np.ones_like(densval, dtype = bool)
+
+	xmax0 = xmax  # cache value
+	sdh_markers = get_config_bool('plot_sdh_markers')
+	sdh_scale_amount = get_config_int('plot_sdh_scale_amount', minval = 0)
+	for dens, dens0, z in zip(densval[sel], densval_in[sel], zval[sel, :]):
+		z = np.array(z, dtype = float)
+		with warnings.catch_warnings():  # Suppress warning for all-NaN arrays
+			warnings.filterwarnings('ignore', r'All-NaN (slice|axis) encountered')
+			zmax = np.nanmax(np.abs(z))
+		if zmax is None or np.isnan(zmax):  # no data
+			continue
+		fig = plt.figure(get_fignum(), figsize = get_plot_size('s'))
+		plt.subplots_adjust(**get_plot_size('subplot'))
+		ax = fig.add_subplot(1, 1, 1)
+
+		sdh_period = eoverhbar / 2 / np.pi / abs(dens0)
+		if len(z) > len(xval) and (len(z) - 1) % (len(xval) - 1) == 0:
+			subdiv = (len(z) - 1) // (len(xval) - 1)
+			if high_resolution:
+				if debug:
+					print("For n = %.4f %s:" % (dens, dscale.unitstr(style = 'raw', integrated=True)))
+					print("Theoretical value of Delta(1/B) = %.6f / T" % sdh_period)
+				xval1, z1 = subdiv_minmax(xval, z)  # Smart subdivision, taking care of minima and maxima
+				pxval = 1. / xval1[xval1 != 0] if reciprocal else xval1
+				pyval = z1[xval1 != 0] if reciprocal else z1
+			else:
+				pxval = 1. / xval[xval != 0] if reciprocal else xval
+				pyval = z[::subdiv][xval != 0] if reciprocal else z[::subdiv]
+		else:
+			pxval = 1. / xval[xval != 0] if reciprocal else xval
+			pyval = z[xval != 0] if reciprocal else z
+		plt.plot(pxval, pyval, '-', color=curve_color)
+		xmax = xmax0
+		if abs(dens0) > 1e-5 and (sdh_markers or sdh_scale_amount > 0):
+			if reciprocal:
+				jmax = int(np.floor(xmax0 / sdh_period))
+				if sdh_scale_amount > 0:
+					if jmax > sdh_scale_amount:
+						jmax = sdh_scale_amount
+						xmax = jmax * sdh_period
+				if sdh_markers:
+					for jsdh in range(0, jmax + 1):
+						marker = 10 if jsdh % 10 == 0 else 2  # 10: caret up, 2: tick up
+						plt.plot(sdh_period * jsdh, 0, color = sdh_color, marker = marker)
+			else:  # not reciprocal
+				jmax = 20
+				for jsdh in range(1, jmax + 1):
+					marker = 10 if jsdh % 10 == 0 else 2  # 10: caret up, 2: tick up
+					plt.plot(1 / jsdh / sdh_period, 0, color = sdh_color, marker = marker)
+			
+		if extra_function is not None:
+			pyval_extra = extra_function(pxval, 0.0 if abs(dens0) < 1e-9 else dens0)
+			linecolor = rcParams['lines.color']
+			plt.plot(pxval, pyval_extra, '--', color=linecolor, zorder = 1.9)
+		if isinstance(yrange, list) and len(yrange) == 2:
+			plt.axis([xmin, xmax, min(yrange), max(yrange)])
+		elif zmax is None or np.isnan(zmax):
+			plt.axis([xmin, xmax, 0.0, 1.0])
+		else:
+			yexp = max(-4, np.floor(np.log10(zmax)))
+			ymax = zmax * 10**-yexp
+			if ymax <= 1.0:
+				ymax, tmaj, tmin = 1.0, 0.2, 0.1
+			elif ymax <= 3.0:
+				ymax, tmaj, tmin = 3.0, 1.0, 0.5
+			else:
+				ymax, tmaj, tmin = 10.0, 2.0, 1.0
+			ax.set_yticks(np.arange(0.0, ymax * 1.01, tmaj) * 10**yexp)
+			ax.set_yticks(np.arange(0.0, ymax * 1.01, tmin) * 10**yexp, minor = True)
+			plt.axis([xmin, xmax, 0.0, ymax * 10**yexp])
+
+		if xlabel is None:
+			set_xlabel("$1/B$" if reciprocal else "$B$", "$\\mathrm{T}^{-1}$" if reciprocal else "$\\mathrm{T}$")
+		else:
+			plt.xlabel(xlabel)
+		if ylabel is not None:
+			plt.ylabel(ylabel)
+		elif zscale is not None:
+			set_ylabel(zscale.qstr(style = 'tex', integrated = False), zscale.unitstr(style = 'tex', integrated = False, negexp = unit_negexp))
+		else:
+			plt.ylabel("??")
+		set_ticks()
+
+		# Density value
+		dens_str = "%.4f" % dens
+		dens_str = dens_str.rstrip("0")
+		if dens_str.endswith("."):
+			dens_str += "0"
+		if not dens_str.startswith("-"):
+			dens_str = "+" + dens_str
+
+		# Density unit
+		dens_unit = dscale.unitstr(style = 'tex', integrated=True, negexp = unit_negexp).strip("$ ")
+		if dens_unit.startswith("10"):
+			dens_unit = r"\times " + dens_unit
+		elif dens_unit.startswith("1") and len(dens_unit) > 1:
+			dens_unit = dens_unit[1:].strip()
+
+		# Density quantity
+		dens_q = dscale.qstr(style = 'tex')
+		if '$' in dens_q:
+			dens_q = dens_q.strip('$')
+		else:
+			dens_q = r"\mathrm{%s}" % dens_q
+
+		# Combine into density label, insert at top left of the figure
+		dens_space = "" if dens_unit.startswith("/") else r"\,"
+		if abs(dens) < 1e-10:
+			ax.text(0.03, 0.98, "$%s = 0%s%s$" % (dens_q, dens_space, dens_unit), ha='left', va='top', transform=ax.transAxes)
+		else:
+			dens_eh = "e" if dens > 0 else "h"
+			ax.text(0.03, 0.98, "$%s = %s%s%s$ (%s)" % (dens_q, dens_str, dens_space, dens_unit, dens_eh), ha='left', va='top', transform=ax.transAxes)
+
+		if pdfpages is None:
+			file_str = "%.2f" % dens
+			file_str.replace('+', 'h_').replace('-', 'e_')
+			plt.savefig("dos-constdens-%s.pdf" % file_str)
+		else:
+			pdfpages.savefig(fig)
+		plt.close()
+
+	if pdfpages is not None:
+		pdfpages.close()
+
+	return
+
+def add_curves(xval, curvesdata, curvesval = None, fig = None, ax = None, filename = "", linewidth = None):
+	"""Add curves to figure. Subdivide the x coordinates if necessary.
+
+	Arguments:
+	xval         Array of x values. This array will be subdivided if necessary.
+	curvesdata   Array of 'y values'. This array may be one-dimensional (single
+	             curve) or two-dimensional (multiple curves).
+	curvesval    NOT USED
+	fig          None, integer, string, or matplotlib figure instance. If not
+	             None, this refers to the figure in which the curves will be
+	             drawn. If None, use the current figure.
+	ax           A matplotlib axis instance in which the curves will be drawn.
+	             If None, use the current axis.
+	filename     Output filename. If None or empty, do not save.
+	linewidth    Can be None (use default line width), a number (use one line
+	             width for all curves) or a list or array (use different line
+	             widths for the curves.
+
+	Returns:
+	A matplotlib figure instance
+	"""
+	if fig is None:
+		fig = plt.gcf()
+	elif isinstance(fig, (int, str)):
+		plt.figure(fig)
+	else:
+		plt.figure(fig.number)
+	if ax is None:
+		ax = plt.gca()
+	else:
+		fig.sca(ax)
+
+	if isinstance(xval, VectorGrid):
+		xval = xval.get_values(xval.prefix)  # TODO: May not always be the correct choice
+	elif isinstance(xval, (list, np.ndarray)) and len(xval) > 0 and isinstance(xval[0], Vector):
+		xval = [v.component(None) for v in xval]
+	xval = np.array(xval, dtype = float)  # ensure that input is a float array
+
+	if isinstance(curvesdata, list):
+		curvesdata = np.array(curvesdata)
+
+	if isinstance(curvesdata, np.ndarray) and len(curvesdata.shape) == 1:
+		# stretch (interpolate) x values if necessary
+		if len(curvesdata) > len(xval) and (len(curvesdata)-1) % (len(xval)-1) == 0:
+			subdiv = (len(curvesdata)-1) // (len(xval)-1)
+			xval1 = np.array([(1. - j/subdiv) * np.array(xval)[:-1] + (j / subdiv) * np.array(xval)[1:] for j in range(0, subdiv)])
+			xval = np.concatenate((np.hstack(xval1.transpose()), np.array(xval)[-1:]), axis=0)
+
+		if linewidth is None:
+			plt.plot(xval, curvesdata, 'k-')
+		elif isinstance(linewidth, (float, np.floating, int, np.integer)):
+			plt.plot(xval, curvesdata, 'k-', linewidth = linewidth)
+		elif isinstance(linewidth, (list, np.ndarray)):
+			plt.plot(xval, curvesdata, 'k-', linewidth = linewidth[0])
+
+	elif isinstance(curvesdata, np.ndarray) and len(curvesdata.shape) == 2:
+		for cj, cdata in enumerate(curvesdata):
+			# stretch (interpolate) x values if necessary
+			if len(cdata) > len(xval) and (len(cdata)-1) % (len(xval)-1) == 0:
+				subdiv = (len(cdata)-1) // (len(xval)-1)
+				xval1 = np.array([(1. - j / subdiv) * np.array(xval)[:-1] + (j / subdiv) * np.array(xval)[1:] for j in range(0, subdiv)])
+				xval1 = np.concatenate((np.hstack(xval1.transpose()), np.array(xval)[-1:]), axis=0)
+			else:
+				xval1 = xval
+
+			if linewidth is None:
+				plt.plot(xval1, cdata, 'k-')
+			elif isinstance(linewidth, (float, np.floating, int, np.integer)):
+				plt.plot(xval1, cdata, 'k-', linewidth = linewidth)
+			elif isinstance(linewidth, (list, np.ndarray)):
+				plt.plot(xval1, cdata, 'k-', linewidth = linewidth[cj])
+
+	if filename is not None and len(filename) > 0:
+		plt.savefig(filename)
+	return fig
+
+def densityz(params, densz, filename = "", title = None, title_val = None, legend = False):
+	"""Plot density as function of (the spatial coordinate) z.
+	Plot (1) total density, (2) electron, hole, and total density or (3)
+	electron, hole, total, and background density.
+
+	Arguments:
+	params     PhysParams instance. Used to extract the array of z values.
+	densz      dict instance. We extract the values for the keys 'total', 'e',
+	           'h', and 'bg'. Each value must be an array of dimension 1 or 2,
+	           or None. If one of the arrays has dimension 2, iterate over the
+	           first axis and write a multipage PDF as output.
+	filename   Output filename
+	title      Plot title
+	title_val  None, number, tuple, list or array. If a number, print this value
+	           in the plot title using % formatting. A tuple can be used for
+	           multiple values. If a list or array, take the subsequent values
+	           for the subsequent plot.
+	legend     If True, plot legend and total surface charges
+
+	No return value.
+	"""
+	dz = params.zres
+	z = params.zvalues_nm()
+	zint = params.interface_z_nm()
+	fmt_opts = {'style': 'tex', 'fmt': '{:.2g}'}  # format for density values
+
+	if not isinstance(densz, dict):
+		raise TypeError("Argument densz must be a dict instance")
+	densz_bg = densz.get('bg')
+	densz_e = densz.get('e')
+	densz_h = densz.get('h')
+	densz_t = densz.get('total')
+	if densz_bg is not None and np.amax(np.abs(densz_bg)) < 1e-15:
+		densz_bg = None
+	if densz_t is None and densz_e is not None and densz_h is not None:
+		densz_t = densz_e + densz_h
+	if densz_t is None and densz_e is None and densz_h is None:
+		sys.stderr.write("ERROR (ploto.densz): Nothing to be plotted\n")
+		return
+
+	dim_e = densz_e.ndim if isinstance(densz_e, np.ndarray) else 0
+	dim_h = densz_h.ndim if isinstance(densz_h, np.ndarray) else 0
+	dim_t = densz_t.ndim if isinstance(densz_t, np.ndarray) else 0
+	dim_bg = densz_bg.ndim if isinstance(densz_bg, np.ndarray) else 0
+	dim = max([dim_e, dim_h, dim_t, dim_bg])
+	if dim > 2:
+		raise ValueError("Data arrays may not be of dimension > 2.")
+	arr_size = densz_t.shape[-1] if densz_t is not None else densz_e.shape[-1] if densz_e is not None else densz_bg.shape[-1]
+	npoints = densz_t.shape[0] if dim_t == 2 else densz_e.shape[0] if dim_e == 2 else densz_bg.shape[0] if dim_bg == 2 else 1
+
+	# Determine maximum
+	densmax = np.amax(np.abs(densz_t))
+	if densz_e is not None:
+		densmax = max(densmax, np.amax(np.abs(densz_e)))
+	if densz_h is not None:
+		densmax = max(densmax, np.amax(np.abs(densz_h)))
+	# Background charge does not affect the plot scale, intentionally.
+
+	# Determine unit automatically
+	for e in range(-15, 4, 3):
+		unit = 10**e
+		if densmax <= 30 * unit:
+			unitstr = "" if e == 0 else "10^{%i}\\;" % e
+			break
+	if unit > 1e2:
+		unit = 1
+		unitstr = ""
+
+	arr_shape = (npoints, arr_size)
+	if densz_e is not None:
+		densz_e = np.broadcast_to(densz_e, arr_shape)
+	if densz_h is not None:
+		densz_h = np.broadcast_to(densz_h, arr_shape)
+	if densz_t is not None:
+		densz_t = np.broadcast_to(densz_t, arr_shape)
+	if densz_bg is not None:
+		densz_bg = np.broadcast_to(densz_bg, arr_shape)
+
+	pdfpages = PdfPages(filename) if filename is not None and filename != "" else None
+	for j in range(0, npoints):
+		# Create figure
+		fig = plt.figure(get_fignum(), figsize = get_plot_size('s'))
+		plt.subplots_adjust(**get_plot_size('subplot'))
+		ax = fig.add_subplot(1, 1, 1)
+		plt.plot([z.min(), z.max()], [0, 0], 'k--')
+
+		## Plot
+		allplots = []
+		legendlabels = []
+		# holes
+		if densz_h is not None:
+			thisplot, = plt.plot(z, densz_h[j] / unit, 'r-')
+			allplots.append(thisplot)
+			legendlabels.append("holes, $\\rho_\\mathrm{h}$")
+		# total
+		if densz_t is not None:
+			thisplot, = plt.plot(z, densz_t[j] / unit, 'b-')
+			allplots.append(thisplot)
+			legendlabels.append("total, $\\rho$")
+		# electrons
+		if densz_e is not None:
+			thisplot, = plt.plot(z, densz_e[j] / unit, 'g-')
+			allplots.append(thisplot)
+			legendlabels.append("electrons, $-\\rho_\\mathrm{e}$")
+		# background
+		if densz_bg is not None:
+			thisplot, = plt.plot(z, densz_bg[j] / unit, 'm--')
+			allplots.append(thisplot)
+			legendlabels.append("backgr., $\\rho_\\mathrm{bg}$")
+
+		# Determine integrals
+		int_dens_t = float('nan') if densz_t is None else np.sum(densz_t[j]) * dz
+		int_dens_e = float('nan') if densz_e is None else np.sum(densz_e[j]) * dz
+		int_dens_h = float('nan') if densz_h is None else np.sum(densz_h[j]) * dz
+		int_dens_bg = float('nan') if densz_bg is None else np.sum(densz_bg[j]) * dz
+
+		## Determine min and max
+		ymin = -densmax / unit
+		ymax = densmax / unit
+		if ymax - ymin < 1e-6:
+			ymin, ymax = -1e-3, 1e-3
+
+		for zi in zint[1:-1]:
+			plt.plot([zi, zi], [ymin, ymax], 'k:')
+		plt.axis([z.min() * 1.05, z.max() * 1.05, ymin - 0.05 * (ymax - ymin), ymax + 0.05 * (ymax - ymin)])
+
+		## Determine labels of y and x axis
+		set_ylabel('$\\rho(z)$', '$%se/\\mathrm{nm}^3$' % unitstr)
+		set_xlabel('$z$', '$\\mathrm{nm}$')
+		set_ticks()
+
+		## Plot legend and text (total surface charges)
+		if legend:
+			ax.legend(handles = allplots, labels = legendlabels, loc='upper right')
+			if densz_e is not None:
+				if abs(int_dens_e) < abs(int_dens_h) * 1e-3:
+					dens_txt = "$n_\\mathrm{e} \\approx 0$"
+				else:
+					valstr = format_value(-int_dens_e, **fmt_opts).strip('$')
+					dens_txt = "$n_\\mathrm{e} = %s\\; e/\\mathrm{nm}^{2}$" % valstr
+				ax.text(0.02, 0.05, dens_txt, ha = 'left', va = 'center', transform = ax.transAxes)
+			if densz_h is not None:
+				if abs(int_dens_h) < abs(int_dens_e) * 1e-3:
+					dens_txt = "$n_\\mathrm{h} \\approx 0$"
+				else:
+					valstr = format_value(int_dens_h, **fmt_opts).strip('$')
+					dens_txt = "$n_\\mathrm{h} = %s\\; e/\\mathrm{nm}^{2}$" % valstr
+				ax.text(0.02, 0.95, dens_txt, ha = 'left', va = 'center', transform = ax.transAxes)
+			if densz_bg is not None:
+				if abs(int_dens_bg) < max(abs(int_dens_e), abs(int_dens_h)) * 1e-3:
+					dens_txt = "$n_\\mathrm{bg} \\approx 0$"
+				else:
+					valstr = format_value(int_dens_bg, **fmt_opts).strip('$')
+					dens_txt = "$n_\\mathrm{bg} = %s\\; e/\\mathrm{nm}^{2}$" % valstr
+				ax.text(0.02, 0.48, dens_txt, ha = 'left', va = 'top', transform = ax.transAxes)
+
+			if densz_e is not None and densz_h is not None and abs(int_dens_t) < max(abs(int_dens_e), abs(int_dens_h)) * 1e-3:
+				dens_txt = "$n \\approx 0$"
+			elif densz_t is not None:
+				dens_txt = "$n = %s\\; e/\\mathrm{nm}^{2}$" % format_value(int_dens_t, **fmt_opts)
+			else:
+				dens_txt = None
+			if dens_txt is not None:
+				ax.text(0.02, 0.52, dens_txt, ha = 'left', va = 'bottom', transform = ax.transAxes)
+
+		if (title is not None) and (title != ""):
+			if isinstance(title_val, (list, np.ndarray)):
+				title_str = title % title_val[j]
+			elif isinstance(title_val, (tuple, int, float, np.integer, np.floating)):
+				title_str = title % title_val
+			else:
+				title_str = title
+			ax.text(0.5, 0.98, title_str, ha='center', va='top', transform=ax.transAxes)
+
+		if pdfpages is None:
+			plt.savefig(filename.replace(".pdf", "-%i.pdf" % (j+1)))
+		else:
+			pdfpages.savefig(fig)
+		plt.close()
+	if pdfpages is not None:
+		pdfpages.close()
+
+	return
diff --git a/kdotpy-v1.0.0/src/kdotpy/ploto/kdotpy.mplstyle b/kdotpy-v1.0.0/src/kdotpy/ploto/kdotpy.mplstyle
new file mode 100644
index 0000000000000000000000000000000000000000..1b9f69640d8a51500aaf3eb145b0fb0eec54183b
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/ploto/kdotpy.mplstyle
@@ -0,0 +1,14 @@
+## Matplotlib style file for kdotpy
+
+## Put the axes, ticks, etc. on top of the plot data.
+axes.axisbelow       : False
+
+## Ticks settings: inward on all four axes
+xtick.top            : True
+xtick.bottom         : True
+xtick.direction      : in
+ytick.left           : True
+ytick.right          : True
+ytick.direction      : in
+
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/ploto/tools.py b/kdotpy-v1.0.0/src/kdotpy/ploto/tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..71fff30d5d6459e1564f6131951a7bb0bdc0cd1f
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/ploto/tools.py
@@ -0,0 +1,1185 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+import os.path
+import sys
+import shutil
+import re
+import warnings
+
+import matplotlib as mpl
+mpl.use('pdf')
+import matplotlib.style as mplstyle
+import matplotlib.pyplot as plt
+from matplotlib import rcParams, rc_file
+from matplotlib.collections import LineCollection
+
+from ..config import get_config, get_config_num, get_config_int, configpath
+from .colortools import color_auto_range, color_interpolation, indexed_color_auto_range, intermediate_colors, try_colormap
+from .toolstext import obs_latex
+from ..momentum import Vector
+from ..observables import all_observables
+from ..etransform import ETransform
+from ..bandtools import overlap_eivec_labels, is_bandpair, subband_labels_from_obsids
+
+default_mplstyle = 'kdotpy.mplstyle'
+scriptpath = os.path.dirname(os.path.realpath(__file__))
+
+### PLOT PARAMETERS ###
+
+# Default parameters for kdotpy. These values cannot be overridden by a
+# matplotlibrc file. Because these values are forced, we are very conservative
+# here: We only set a few parameters that, if set otherwise, would negatively
+# affect plot quality or be a nuisance.
+default_matplotlibrc = {
+	'figure.max_open_warning': -1,
+	'path.simplify': False
+}
+
+def load_matplotlibrc(filename):
+	"""Load a matplotlibrc file; handle warnings"""
+	nwarn = 0
+	with warnings.catch_warnings(record = True) as w:
+		warnings.simplefilter('error')
+		try:
+			rc_file(filename)
+		except:
+			sys.stderr.write("ERROR (load_matplotlibrc): Parsing error on loading '%s' as matplotlibrc file.\n" % filename)
+			raise
+		nwarn = len(w)
+	if nwarn > 0:
+		sys.stderr.write("Warning (load_matplotlibrc): Parsing '%s' as matplotlibrc has generates %i warnings. Check whether this file is a valid matplotlibrc file.\n" % (filename, nwarn))
+	return nwarn == 0
+
+def initialize_matplotlib_style(style=None):
+	"""Apply matplotlib style from argument or configuration value
+
+	We also copy the default style file from the script directory to the
+	configuration directory. If this file already exists in the configuration
+	directory, do not overwrite it, even if it has been edited.
+	"""
+	source_mplstyle_file = os.path.join(scriptpath, default_mplstyle)
+	default_mplstyle_file = os.path.join(configpath, default_mplstyle)
+	if not os.path.isfile(source_mplstyle_file):
+		raise OSError("Built-in maplotlib style file does not exist")
+	if not os.path.isfile(default_mplstyle_file):
+		shutil.copy(source_mplstyle_file, default_mplstyle_file)
+		sys.stderr.write(f"Info (initialize_matplotlib_style): Default matplotlib style file '{default_mplstyle}' created in {configpath}.\n")
+
+	if style is None:
+		style = get_config('fig_matplotlib_style')
+	if style == '':
+		return None
+	elif os.path.isfile(style):
+		pass
+	elif os.path.isfile(os.path.join(configpath, style)):
+		style = os.path.join(configpath, style)
+	elif style in mplstyle.available:
+		pass
+	else:
+		sys.stderr.write(f"ERROR (apply_matplotlib_style): Style '{style}' is neither an existing matplotlib style file, nor a predefined style.\n")
+		return None
+	if 'verbose' in sys.argv:
+		print(f"Using matplotlib style '{style}'.")
+	mplstyle.use(style)
+	return style
+
+def initialize_matplotlibrc():
+	"""Load (custom) matplotlibrc file and forcibly set default values"""
+	fname_config = os.path.join(configpath, 'matplotlibrc')
+	fname_local = 'matplotlibrc'
+	if os.path.isfile(fname_config) and not os.path.isfile(fname_local):
+		# If ~/.kdotpyrc/matplotlibrc exists, reset parameters to matplotlib
+		# defaults and then load it and show a warning.
+		sys.stderr.write(f"Warning (init_matplotlibrc): Found matplotlibrc in configuration directory ({configpath}). For customizing plots, it is recommended to use matplotlib style files instead.\n")
+		mpl.rcdefaults()
+		load_matplotlibrc(fname_config)
+		fname = fname_config
+	elif os.path.isfile(fname_local):
+		# If matplotlibrc exists in current working directory, then matplotlib
+		# has already loaded it. Show a warning nevertheless.
+		sys.stderr.write("Warning (init_matplotlibrc): Found matplotlibrc in the current working directory. For customizing plots, it is recommended to use matplotlib style files instead.\n")
+		fname = fname_local
+	else:
+		# Otherwise, matplotlib has already loaded a matplotlibrc file from
+		# another predefined location, see
+		# https://matplotlib.org/stable/users/explain/customizing.html#customizing-with-matplotlibrc-files
+		fname = mpl.matplotlib_fname()
+	if 'verbose' in sys.argv:
+		print(f"Using matplotlibrc file at {fname}")
+
+	# Forcibly update the rc parameters with the kdotpy defaults; see comment
+	# about default_matplotlibrc above. TODO: Is it OK to do this silently?
+	rcParams.update(default_matplotlibrc)
+	return fname
+
+def initialize():
+	"""Wrapper for initialize_matplotlibrc() and initialize_matplotlib_style()"""
+	matplotlibrc = initialize_matplotlibrc()
+	style = initialize_matplotlib_style()
+	return matplotlibrc, style
+
+def get_plot_size(which, inches = True, legend = False):
+	"""Get (default) plot properties, taken from configuration file.
+
+	Arguments:
+	which   Label of the plot property
+	inches  If True, give length in inches; if False, give length in mm
+	legend  Boolean value that indicates whether the plot contains a colorbar
+	        legend.
+	"""
+
+	# Figure dimensions
+	if which in ['hsize', 'sh', 'h']:
+		val = get_config_num('fig_hsize')
+		valmm = 150. if val is None or val <= 0.0 else val
+		if legend and get_legend_method() == 'extend':
+			valmm += get_config_num('fig_colorbar_space')
+			valmm -= get_plot_size('mr', False)
+	elif which in ['vsize', 'sv', 'v']:
+		val = get_config_num('fig_vsize')
+		valmm = 100. if val is None or val <= 0.0 else val
+	elif which in ['figsize', 'size', 's']:
+		return get_plot_size('h', inches = inches, legend = legend), get_plot_size('v', inches = inches, legend = legend)
+	# Inner plot dimensions
+	elif which in ['plotwidth', 'pw']:
+		valmm = get_plot_size('h', False, legend = legend) - get_plot_size('ml', False) - get_plot_size('mr', False)
+	elif which in ['plotheight', 'ph']:
+		valmm = get_plot_size('v', False, legend = legend) - get_plot_size('mt', False) - get_plot_size('mb', False)
+	# Margins (absolute)
+	elif which in ['lmargin', 'ml']:
+		val = get_config_num('fig_lmargin')
+		valmm = 20. if val is None else val
+	elif which in ['rmargin', 'mr']:
+		val = get_config_num('fig_rmargin')
+		valmm = 4. if val is None else val
+	elif which in ['bmargin', 'mb']:
+		val = get_config_num('fig_bmargin')
+		valmm = 12. if val is None else val
+	elif which in ['tmargin', 'mt']:
+		val = get_config_num('fig_tmargin')
+		valmm = 3. if val is None else val
+	# Margins (relative)
+	elif which in ['left', 'rl']:
+		return get_plot_size('ml') / get_plot_size('h', legend = legend)
+	elif which in ['right', 'rr']:
+		return 1.0 - get_plot_size('mr') / get_plot_size('h', legend = legend)
+	elif which in ['bottom', 'rb']:
+		return get_plot_size('mb') / get_plot_size('v', legend = legend)
+	elif which in ['top', 'rt']:
+		return 1.0 - get_plot_size('mt') / get_plot_size('v', legend = legend)
+	# For subplot_adjust
+	elif which in ['sub', 'subplot']:
+		return {'left': get_plot_size('ml') / get_plot_size('h', legend = legend), 'right': 1.0 - get_plot_size('mr') / get_plot_size('h', legend = legend), 'bottom': get_plot_size('mb') / get_plot_size('v', legend = legend), 'top': 1.0 - get_plot_size('mt') / get_plot_size('v', legend = legend), 'wspace': 0.0, 'hspace': 0.0}
+	# Colorbar parameters
+	elif which in ['wcb', 'colorbar_size']:
+		valmm = get_config_num('fig_colorbar_size')
+	elif which in ['scb', 'colorbar_space']:
+		valmm = get_config_num('fig_colorbar_space')
+	elif which in ['mcb', 'colorbar_margin']:
+		valmm = get_config_num('fig_colorbar_margin')
+	else:
+		raise ValueError("Illegal value for argument 'which'.")
+	return valmm / 25.4 if inches else valmm
+
+
+_legend_method_warning_shown = False
+def get_legend_method(key = 'fig_colorbar_method'):
+	"""Get the legend method (from config file)"""
+	global _legend_method_warning_shown
+	legend_method = get_config(key)
+	if isinstance(legend_method, str):
+		legend_method = legend_method.lower()
+		if legend_method not in ['insert', 'extend', 'file']:
+			if not _legend_method_warning_shown:
+				sys.stderr.write("Warning (get_legend_method): Invalid legend method (configuration option '%s')\n" % key)
+				_legend_method_warning_shown = True
+			legend_method = 'insert'
+	return legend_method
+
+
+_quiver_opts_warning_shown = False
+def get_quiver_opts(arrowscale = None):
+	"""Get configuration options for arrow plots [with plt.quiver()]"""
+	global _quiver_opts_warning_shown
+	# arrowscale is length in mm for vector length 0.5
+	if arrowscale is None:
+		arrowscale = get_config_num('fig_spin_arrow_length')
+	if arrowscale is None or arrowscale <= 0.0:
+		if not _quiver_opts_warning_shown:
+			sys.stderr.write("Warning (get_quiver_opts): Spin arrow length must be a positive number.\n")
+			_quiver_opts_warning_shown = True
+		arrowscale = 5.0
+	return {'units': 'inches', 'pivot': 'tail', 'scale': 12.7 / arrowscale, 'scale_units': 'inches', 'width': 0.4 / 25.4, 'headwidth': 3, 'headlength': 4, 'headaxislength': 3.5}
+
+
+fignum = 0  # reset
+def get_fignum():
+	"""Get new figure number (global counter)"""
+	global fignum
+	fignum += 1
+	return None  # Let matplotlib handle figure counter. Save for multiprocessing (Windows).
+
+def get_default_fontsize(rcparam):
+	"""Get default font size from an rcparam instance"""
+	try:
+		basesize = float(rcParams['font.size'])
+	except:
+		sys.stderr.write("Warning (default_fontsize): Normal font size not defined.\n")
+		basesize = 10.0
+	if rcparam not in rcParams:
+		sys.stderr.write("Warning (default_fontsize): Label '%s' does not point to a valid matplotlib rc parameter.\n" % rcparam)
+		return basesize
+	rcsize = rcParams[rcparam]
+	sizes = ['xx-small', 'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large']
+	try:
+		size = float(rcsize)
+	except:
+		if rcsize == 'larger':
+			size = basesize * 1.2
+		elif rcsize == 'smaller':
+			size = basesize / 1.2
+		elif rcsize in sizes:
+			size_exp = sizes.index(rcsize) - 3
+			size = basesize * 1.2**size_exp
+		else:
+			sys.stderr.write("Warning (default_fontsize): Value '%s' is not a valid font size.\n" % rcsize)
+			size = basesize
+	return size
+
+### OBSERVABLE HANDLING ###
+
+def process_plot_obs(obsids, obs):
+	"""Determine color function and LaTeX string of the given observable
+
+	Arguments:
+	obsids   List of valid observable ids
+	obs      The observable id for which to return colour data and observable
+	         string.
+
+	Returns:
+	color    Colour data. This could be a string, or a list of the form
+	         [colortype (string), obsid, ..., obsid, param, ..., param]. See
+	         also information for function data_colors()
+	obsstr   String form of observable
+
+	Note:
+	The return value may be None, None if the function fails.
+	"""
+	color = None
+	obsstr = None
+	if obs in all_observables:
+		if '[' not in obs:  # exclude indexed observables (effective, but not elegant)
+			obs = all_observables[obs].obsid  # deal with 'alias' observable ids
+	# Special case (handling 'berryz' <--> 'berry' alias):
+	if obs == 'berryz' and 'berry' in obsids:
+		obs = 'berry'
+
+	# For dual observables (of the form 'obs1.obs2')
+	obsdot = obs.split('.') if len(obs) >= 3 and '.' in obs else None
+	if obsdot is not None and len(obsdot) > 2:
+		sys.stderr.write("Warning (process_plot_obs): Observable id with more than one dot (.) is not valid.\n")
+		return None, None
+
+	if "sigma" in obs:
+		obs1 = obs.split("sigma")[1]
+		obs2 = obs1 + "2"
+		if obs1 in obsids and obs2 in obsids:
+			minmax = color_auto_range(obs1)
+			color = ["sigma", obs1, obs2, 0.0, minmax[1]]
+			obsstr = obs_latex(obs)
+	elif obs.startswith("ipr") and obs in obsids:
+		minmax = color_auto_range(obs)
+		cmap = try_colormap(get_config('color_ipr'))
+		color = ["colormap", obs, minmax[0], minmax[1], cmap]
+		obsstr = obs_latex(obs)
+	elif obs == "orbitalrgb" and "gamma6" in obsids and "gamma8l" in obsids and "gamma8h" in obsids:
+		color = ["RGB", "gamma6", "gamma8l", "gamma8h"]
+		obsstr = [r"$\Gamma_{6}$", r"$\Gamma_{8,\mathrm{LH}}$", r"$\Gamma_{8,\mathrm{HH}}$", r"$\Gamma_{7}$"]
+		# obsstr = r"$(\langle P_{\Gamma_{6}}\rangle,\langle P_{\Gamma_{8};\mathrm{LH}}\rangle,\langle P_{\Gamma_{8};\mathrm{HH}}\rangle)$"
+	elif obs == "subbandrgb" and "E1+" in obsids and "E1-" in obsids and "H1+" in obsids and "H1-" in obsids and "H2+" in obsids and "H2-" in obsids:
+		color = ["RGB", "E1+", "E1-", "H1+", "H1-", "H2+", "H2-"]
+		obsstr = ["E1", "H1", "H2"]
+		# obsstr = r"$(\langle P_{\mathrm{E}1}\rangle,\langle P_{\mathrm{H}1}\rangle,\langle P_{\mathrm{H}2}\rangle)$"
+	elif obs.startswith("subband"):
+		subbands_labels = overlap_eivec_labels(obs)
+		available_bands = subband_labels_from_obsids(obsids)
+		nsl = len(subbands_labels)
+		if nsl == 0:
+			sys.stderr.write("Warning (process_plot_obs): No valid subbands specified as part of 'subband' observable.\n")
+			return None, None
+		ov_labels = ["(%+i)" % lb if isinstance(lb, tuple) and len(lb) == 1 else str(lb) for lb in subbands_labels]
+		if any([o not in obsids for o in ov_labels]):
+			if len(available_bands) >= 1:
+				sys.stderr.write("Warning (process_plot_obs): No data available for the requested subband(s), or duplicate subbands requested.\n")
+				sys.stderr.write("Available subbands for coloring: " + ", ".join(available_bands) + ".\n")
+			else:
+				sys.stderr.write("Warning (process_plot_obs): No subbands available for coloring. Have you included 'overlaps' as command-line argument?\n")
+			return None, None
+		pairs = False
+		if nsl % 2 == 0:
+			pairs = True
+			for jj in range(0, nsl, 2):
+				pairs &= is_bandpair(subbands_labels[jj], subbands_labels[jj+1])
+		obsstr = []
+		if pairs:
+			for jj in range(0, nsl, 2):
+				lb1, lb2 = subbands_labels[jj], subbands_labels[jj+1]
+				if isinstance(lb1, tuple) and isinstance(lb2, tuple):
+					obsstr.append("%+i,%+i" % (lb1[0], lb2[0]))
+				else:
+					obsstr.append(lb1[:-1])
+		else:
+			sys.stderr.write("Warning (process_plot_obs): For colouring, requested subbands should come in pairs.\n")  # TODO: Allow non-paired subbands
+			return None, None
+
+		color = ["RGB"] if len(obsstr) == 3 else ["mix"]
+		color.extend(ov_labels)
+	elif obs == "llindex" or (obs in ["llavg", "llbymax"] and obs in obsids):
+		minmax = color_auto_range(obs)
+		if minmax is not None:
+			color = ["indexed", obs, minmax[0], minmax[1]]
+		else:
+			color = ["indexed", obs, -2.5, 17.5]
+		obsstr = (r"LL $n$", "") if obs_latex(obs) is None else obs_latex(obs)
+	elif obsdot is not None and obsdot[0].startswith("ll") and obsdot[1] in ["jz", "sz", "isopz"] and (obsdot[0] == 'llindex' or obsdot[0] in obsids) and obsdot[1] in obsids:
+		color = ["indexedpm", obsdot[0], obsdot[1], -2.5, 7.5]
+		obsstr = (r"LL $n$", "") if obs_latex(obs) is None else obs_latex(obs)
+	elif obs == "bindex":
+		cmap = try_colormap(get_config('color_bindex'))
+		lower, upper = indexed_color_auto_range(cmap, default=20)
+		color = ["indexed", "bindex", lower, upper]
+		obsstr = r"band $i$", ""
+	elif obsdot is not None and obsdot[0] == "bindex" and obsdot[1] in ["jz", "sz", "isopz"] and obsdot[1] in obsids:
+		color = ["indexedpm", obsdot[0], obsdot[1], -4.5, 5.5]
+		obsstr = r"band $i$", ""
+	elif obsdot is not None and obsdot[1] in ["jz", "sz", "isopz"] and obsdot[0] in obsids and obsdot[1] in obsids:
+		minmax1 = color_auto_range(obsdot[0])
+		color = ["shadedpm", obsdot[0], obsdot[1], minmax1[0], minmax1[1]]
+		obsstr = obs_latex(obs)
+	elif obsdot is not None and obsdot[1] in ["jz", "sz", "isopz"] and obsdot[0].startswith("abs") and len(obsdot[0]) > 3 and obsdot[0][3:] in obsids and obsdot[1] in obsids:
+		minmax1 = color_auto_range(obsdot[0][3:])
+		color = ["shadedpmabs", obsdot[0][3:], obsdot[1], 0.0, max(abs(minmax1[0]), abs(minmax1[1]))]
+		obsstr = obs_latex(obs)
+	elif obs in obsids:
+		minmax = color_auto_range(obs)
+		if minmax == [-1.5, 1.5, -1.0, 1.0]:
+			cmap = try_colormap(get_config('color_threehalves'))
+			color = ["colormap", obs, minmax[0], minmax[1], cmap]
+		else:
+			color = ["obs", obs] + minmax
+		obsstr = obs_latex(obs)
+	elif obs is not None and obs != "":
+		sys.stderr.write("Warning (process_plot_obs): Observable \'%s\' not available.\n" % obs)
+		available_obsids = obsids
+		for o in obsids:
+			if len(o) > 0 and o[-1] != '2' and o + '2' in obsids:
+				available_obsids.append("sigma"+o)
+		if "gamma6" in obsids and "gamma8l" in obsids and "gamma8h" in obsids:
+			available_obsids.append("orbitalrgb")
+		if "E1+" in obsids and "E1-" in obsids and "H1+" in obsids and "H1-" in obsids and "H2+" in obsids and "H2-" in obsids:
+			available_obsids.append("subbandrgb")
+		sys.stderr.write("Available observables: " + ", ".join(available_obsids) + "\n")
+	return color, obsstr
+
+### PLOT DATA ###
+
+def log10_clip(arr, minval, maxval):
+	"""Clip array values between 10^minval and 10^maxval and return log10(data)"""
+	out = np.full_like(arr, float(minval))
+	out = np.log10(arr, where = (arr >= 10**minval), out = out)
+	return np.clip(out, minval, maxval)
+
+def log10_scale(arr, minval, maxval):
+	"""Position of values on a logarithmic scale between 10^minval and 10^maxval"""
+	clip = log10_clip(arr, minval, maxval)
+	return (clip - minval) / (maxval - minval)
+
+def get_transitions_deltaemax(data, qty = 'rate', qmin = None, qmax = None):
+	"""Get value of maximum Delta E from transitions data and configuration value"""
+	deltaemax = get_config_num('transitions_max_deltae', minval = 0.0)
+	if qmin is None or qmax is None:
+		qmin, qmax = get_transitions_log_limits(data, qty = qty)
+
+	# Counters for automatic determination of vertical limit; other initialization
+	# The outcome of this method is ignored if deltaemax is specified
+	elimits = [1.0, 2.0, 3.0, 5.0, 8.0, 10.0, 20.0, 30.0, 50.0, 80.0, 100.0, 150.0]
+	eress = [0.01, 0.01, 0.01, 0.02, 0.02, 0.05, 0.1, 0.1, 0.2, 0.2, 0.2, 0.5]
+	counts = [0.0 for e in elimits]
+	totalcount = 0.0
+	# Automatic determination of vertical limit for transitions
+	if deltaemax is None or deltaemax == 0.0:
+		for d in data:
+			if d is None or d.transitions is None or d.transitions.n == 0:
+				continue
+			td = d.transitions  # shortcut
+			amp = td.get_values(qty)
+			q = -qmin + log10_clip(amp, qmin, qmax)
+			totalcount += np.sum(q)
+			for j, el in enumerate(elimits):
+				counts[j] += np.sum(q[td.delta_e() <= el])
+		emax = None
+		for c, el in zip(counts, elimits):
+			if c >= 0.8 * totalcount:
+				emax = el
+				break
+		if emax is None:
+			emax = elimits[-1]
+	else:
+		emax = deltaemax
+
+	# Automatic determination of resolution
+	eres = 0.01
+	for el, er in zip(elimits, eress):
+		if el <= emax:
+			eres = er
+		else:
+			break
+
+	return emax, eres
+
+def get_transitions_quantity(key = 'plot_transitions_quantity'):
+	"""Get the quantity used for colouring in the transitions plot."""
+	qty = get_config(key, choices = ['deltae', 'delta_e', 'freq', 'lambda', 'wavelength', 'occupancy', 'amplitude', 'rate', 'ratedensity', 'rate_density', 'absorption'])
+	if qty is None:
+		sys.stderr.write("Warning (get_transitions_quantity): Invalid configuration key '%s' for transition plot quantity. Set to default value 'rate'.\n" % key)
+		qty = 'rate'
+	elif qty.lower() == 'freq':
+		qty = 'freq_thz'
+	elif qty.lower() in ['lambda', 'wavelength']:
+		qty = 'lambda_um'
+	return qty.lower()
+
+def get_transitions_log_limits(data, qty = 'rate'):
+	"""Determine the limits for the colour scale in the transitions plot.
+
+	Arguments:
+	data    Data values
+	qty     The quantity that is used for colouring.
+
+	Returns:
+	qmin, qmax   Lower and upper limits."""
+	qmax = 1e-4
+	for d in data:
+		if d is None or d.transitions is None or d.transitions.n == 0:
+			continue
+		td = d.transitions  # shortcut
+		amp = td.get_values(qty)
+		sel = (td.amp_density() >= 1.0) & (td.delta_e() >= 1.0)
+		if td is not None and td.n > 0 and np.count_nonzero(sel) > 0:
+			qmax = max(qmax, np.amax(amp[sel]))
+	if qty in ['deltae', 'delta_e', 'freq', 'freqthz', 'freq_thz', 'lambda', 'wavelength', 'lambdaum', 'lambda_um']:
+		qmin, qmax = -1, max(min(np.ceil(np.log10(qmax)), 3), -1)
+	elif qty in ['occupancy', 'absorption']:
+		qmin, qmax = -3, max(min(np.ceil(np.log10(qmax)), 1), -2)
+	elif qty in ['amplitude', 'rate', 'ratedensity', 'rate_density']:
+		qmin, qmax = -2, max(min(np.ceil(np.log10(qmax)), 9), 1)
+	else:
+		qmin, qmax = 0, 6
+	return qmin, qmax
+
+def spin_markers(spinval, v1 = 0.25, v2 = 1.0):
+	"""Get spin markers from data.
+
+	Show different markers depending on value s.
+	Domains: s <= -v2; -v2 < s <= -v1; -v1 < s < v1; v1 <= s < v2; v2 <= s
+
+	Arguments:
+	spinval   Single number, list, or array containing spin (Sz or Jz) value(s)
+	v1, v2    Limits; see list of domains above
+
+	Returns:
+	Marker (one-character string), list of markers or array of markers,
+	following the type of argument spinval.
+	"""
+	if isinstance(spinval, list):
+		return ['v' if s <= -v2 else '1' if s <= -v1 else '+' if s < v1 else '2' if s < v2 else '^' for s in spinval]
+	elif isinstance(spinval, np.ndarray):
+		return np.array(['v' if s <= -v2 else '1' if s <= -v1 else '+' if s < v1 else '2' if s < v2 else '^' for s in spinval])
+	else:
+		return 'v' if spinval <= -v2 else '1' if spinval <= -v1 else '+' if spinval < v1 else '2' if spinval < v2 else '^'
+
+class SpinArrows:
+	"""Container class for vector field data, to prepare it for a quiver plot.
+
+	Attributes:
+	u, v         Horizontal and vertical component of the vector field.
+	unit_marker  If True, all arrows will be normalized to the same length (but
+	             u and v are not modified).
+	maxlen       Upper bound for arrow length.
+	"""
+	def __init__(self, u, v = None, scale = 1.0, maxlen = None, unit_marker = False):
+		if v is None:
+			uv = np.asarray(u)
+			if uv.ndim != 2:
+				raise ValueError("Input must be two-dimensional")
+			if uv.shape[1] == 2:
+				uv = uv.transpose()
+			if uv.shape[0] != 2:
+				raise ValueError("Input must have shape (2,n) or (n,2)")
+			self.u, self.v = uv[0], uv[1]
+		else:
+			self.u = np.asarray(u) / scale
+			self.v = np.asarray(v) / scale
+		if self.u.shape != self.v.shape:
+			raise ValueError("Arrow u and v must be of the same length")
+		self.unit_marker = unit_marker
+		self.maxlen = maxlen
+
+	def unit_length(self, l = 1.0):
+		"""Get normalized arrows.
+
+		Argument:
+		l  Length (default value 1)
+
+		Returns:
+		normalized_u, normalized_v
+		"""
+		length_sq = self.u**2 + self.v**2
+		length_inv = np.zeros_like(self.u, dtype = float)
+		length_inv = np.reciprocal(np.sqrt(length_sq), out = length_inv, where = (length_sq != 0))
+		return self.u * length_inv * l, self.v * length_inv * l
+
+	def max_length(self, lmax = None):
+		"""Scale all arrows to a specified maximum length."""
+		if lmax is None:
+			lmax = self.maxlen
+		if lmax is None or lmax <= 0.0:
+			return (self.u, self.v)
+		length = np.sqrt(self.u**2 + self.v**2)
+		scale = np.maximum(length / lmax, 1.0)
+		return (self.u / scale, self.v / scale)
+
+	def get_uv(self):
+		"""Get vector field. Use appropriate scaling as it was set at construction."""
+		return self.unit_length(l = 0.5) if self.unit_marker else self.max_length() if self.maxlen is not None else (self.u, self.v)
+
+	def plot(self, xval, yval, rmin = 0.0, polar = False, max_arrows = None, color = None, **plot_kwds):
+		"""Generate a quiver (vector-field) plot.
+
+		Arguments:
+		xval        x coordinates of the arrow bases
+		yval        y coordinates of the arrow bases
+		rmin        Radius, below which no arrows are shown. Useful for polar
+		            plots, where arrows bunch up near zero radius.
+		polar       If True, create a polar plot. If False, create a cartesian
+		            plot.
+		max_arrows  An integer that indicates approximately how many arrows will
+		            be shown along each direction. If None, plot arrows at all
+		            specified coordinates.
+		color       The arrow colour.
+		**plot_kwds Additional plot options that will be passed on to
+		            matplotlib.pyplot.quiver()
+
+		No return value."""
+		if not isinstance(xval, np.ndarray) or not isinstance(yval, np.ndarray):
+			raise TypeError("Arguments 'xval' and 'yval' must be numpy arrays.")
+		if not xval.shape == yval.shape:
+			raise ValueError("Arguments 'xval' and 'yval' must be arrays of identical shapes.")
+		if max_arrows is None:
+			max_arrows = get_config_int('fig_max_arrows', minval = 1)
+		if color is None:
+			color = get_config('fig_arrow_color_2d')
+
+		u, v = self.get_uv()
+		if xval.ndim == 2:
+			xvalf, yvalf = xval.flatten(), yval.flatten()
+			if len(xvalf) != len(u):
+				raise ValueError("Lengths of arrays 'xval' and 'yval' does not match length of vector marker data.")
+
+			if max_arrows is not None and max_arrows > 0:
+				x_div = max((xval.shape[0] - 1) // max_arrows, 1)
+				y_div = max((xval.shape[1] - 1) // max_arrows, 1)
+				sel1 = np.zeros(xval.shape, dtype = bool)
+				sel1[::x_div, ::y_div] = True
+				sel1 = sel1.flatten()
+			else:
+				sel1 = True
+			if polar:
+				sel2a = (xvalf >= 4 * rmin)
+				sel2b = (xvalf < 4 * rmin) & (xvalf >= rmin) & ((np.mod(np.degrees(yvalf), 45.0) < 1e-6) | (45.0 - np.mod(np.degrees(yvalf), 45.0) < 1e-6))
+				sel = sel1 & (sel2a | sel2b)
+				plt.quiver(yvalf[sel], xvalf[sel], u[sel], v[sel], color = color, **{**get_quiver_opts(), **plot_kwds})
+			else:
+				sel2 = (np.maximum(np.abs(xvalf), np.abs(yvalf)) >= rmin)
+				sel = sel1 & sel2
+				plt.quiver(xvalf[sel], yvalf[sel], u[sel], v[sel], color = color, **{**get_quiver_opts(), **plot_kwds})
+		else:
+			raise NotImplementedError("Not (yet) implemented for dimensions other than two.")
+
+def get_vector_obs(mode):
+	"""Get observables for vector field (quiver) plot from plot mode.
+
+	Argument:
+	mode    Plot mode (string)
+
+	Returns:
+	obs_u, obs_v   Observables for horizontal and vertical component of the
+	               vector field.
+	"""
+	if mode is None:
+		return None
+	match = re.fullmatch(r'(spin|berry)(xy|xz|yz)1?', mode)
+	if match is None:
+		return None
+	obs0 = 's' if match.group(1) == 'spin' else match.group(1)
+	return obs0 + match.group(2)[0], obs0 + match.group(2)[1]
+
+def get_observable_vector_scale(obs):
+	"""Get preset scale from vector field observable."""
+	scale = 0.0
+	if isinstance(obs, str):
+		obs = [obs]
+	for o in obs:
+		if o in all_observables:
+			scale = max(scale, max([abs(val) for val in all_observables[o].minmax]))
+	return 0.5 if scale == 0.0 else scale / 0.5  # factor 0.5 so that spins scale to 1
+
+def get_levels(zmin0, zmax0, thicknesses = [0.5, 1.5]):
+	"""Get energy levels etc. for contour plot.
+
+	Arguments:
+	zmin0, zmax0   Minimum and maximum input values
+	thicknesses    2-element list containing line widths of contours (thin and
+		           thick, respectively)
+
+	Returns:
+	elevelsf       All levels, including those that will not be drawn
+	elevels        Only the levels that will be drawn
+	ethickness     Line widths of the levels that will be drawn
+	elevelfmt      Format string for energies
+
+	NOTE: There is a distinction between elevelsf and elevels (i.e., with and
+	without levels that will not be drawn). We need to take care of this because
+	of an incompatibility between matplotlib v2.2.2 and v2.0.0. Actually, for the
+	older version it is not necessary to disinguish. We can also use elevelsf for
+	the plot colours.
+	"""
+	zmin = zmin0
+	zmax = zmax0
+	zdelta = 1.0
+	if zmax - zmin <= 1.0:
+		while zmax - zmin <= zdelta:
+			zdelta /= 10
+		if (zmax - zmin) / zdelta <= 1.5:
+			zmin, zmax = zdelta * 0.5 * np.floor(zmin / zdelta / 0.5), zdelta * 0.5 * np.ceil(zmax / zdelta / 0.5)
+			emajor, eminorsubdiv = 0.5 * zdelta, 5
+		elif (zmax - zmin) / zdelta <= 3.0:
+			zmin, zmax = zdelta * 1.0 * np.floor(zmin / zdelta / 1.0), zdelta * 1.0 * np.ceil(zmax / zdelta / 1.0)
+			emajor, eminorsubdiv = 1.0 * zdelta, 5
+		elif (zmax - zmin) / zdelta <= 6.0:
+			zmin, zmax = zdelta * 2.0 * np.floor(zmin / zdelta / 2.0), zdelta * 2.0 * np.ceil(zmax / zdelta / 2.0)
+			emajor, eminorsubdiv = 2.0 * zdelta, 4
+		else:
+			zmin, zmax = zdelta * 5.0 * np.floor(zmin / zdelta / 5.0), zdelta * 5.0 * np.ceil(zmax / zdelta / 5.0)
+			emajor, eminorsubdiv = 5.0 * zdelta, 5
+	elif zmax - zmin <= 3.0:
+		zmin, zmax = 1.0 * np.floor(zmin / 1.0), 1.0 * np.ceil(zmax / 1.0)
+		emajor, eminorsubdiv = 1.0, 5
+	elif zmax - zmin <= 6.0:
+		zmin, zmax = 2.0 * np.floor(zmin / 2.0), 2.0 * np.ceil(zmax / 2.0)
+		emajor, eminorsubdiv = 2.0, 4
+	elif zmax - zmin <= 15.0:
+		zmin, zmax = 5.0 * np.floor(zmin / 5.0), 5.0 * np.ceil(zmax / 5.0)
+		emajor, eminorsubdiv = 5.0, 5
+	elif zmax - zmin <= 30.0:
+		zmin, zmax = 10.0 * np.floor(zmin / 10.0), 10.0 * np.ceil(zmax / 10.0)
+		emajor, eminorsubdiv = 10.0, 5
+	elif zmax - zmin <= 60.0:
+		zmin, zmax = 20.0 * np.floor(zmin / 20.0), 20.0 * np.ceil(zmax / 20.0)
+		emajor, eminorsubdiv = 20.0, 4
+	elif zmax - zmin <= 150.0:
+		zmin, zmax = 50.0 * np.floor(zmin / 50.0), 50.0 * np.ceil(zmax / 50.0)
+		emajor, eminorsubdiv = 50.0, 5
+	else:
+		zmin, zmax = 100.0 * np.floor(zmin / 100.0), 100.0 * np.ceil(zmax / 100.0)
+		emajor, eminorsubdiv = 100.0, 5
+	# print (zmin, zmax, emajor, eminorsubdiv)
+	decimals = max(2, 1-int(np.floor(1e-3 + np.log10(emajor))))
+	elevelsf = np.around(np.linspace(zmin, zmax, 1 + int(round(eminorsubdiv * (zmax - zmin) / emajor))), decimals = decimals)
+	elevelfmt = '$%i$' if emajor >= 1.0 else '$%%.%if$' % (decimals - 1)
+	# print (elevelsf)
+	ethicknessf = thicknesses[0] * np.ones_like(elevelsf)
+	ethicknessf[::eminorsubdiv] = thicknesses[1]
+
+	ethickness = ethicknessf[(elevelsf >= zmin0) & (elevelsf <= zmax0)]
+	elevels = elevelsf[(elevelsf >= zmin0) & (elevelsf <= zmax0)]
+	# print (zip(elevels, ethickness))
+	return elevelsf, elevels, ethickness, elevelfmt
+
+
+# Setting for plot_data_series: If set to True, then do not plot incomplete data
+# sets, i.e., those containing NaN values. Otherwise, ignore this restriction.
+nan_strict = False
+
+def plot_data_series(xval, yval, axis = None, fig = None, colors = None, markers = None, zorder = None, yrange = None, transform = None):
+	"""Plotting data series (scatter or line plot) (main function)
+	Each call to this function will typically yield one matplotlib collection
+	object. This is faster than drawing points or lines one-by-one.
+
+	Arguments:
+	xval, yval   Arrays with the x and y coordinates. These must be of the same
+	             size. A single number is interpreted as an array of length 1.
+	             The value(s) must be numeric; Vector instances are not
+	             permitted.
+	axis         matplotlib axis instance in which to draw the data; if None,
+	             use the current axis.
+	fig          matplotlib figure instance in which to draw the data; if None,
+	             use the current figure.
+	colors       Colours of the data points. The value can be None (default
+	             colour), a matplotlib colour character (e.g., 'b' for blue), or
+	             an RGB or RGBA tuple (array or tuple of length 3 or 4).
+	             Anything that the color keyword in the matplotlib plot
+	             functions accepts is permitted. If one such value is specified,
+	             then apply the colour to all data points identically. If an
+	             array of such values with the same length as xval and yval
+	             is given, then the data points are drawn with different
+	             colours.
+	markers      Data markers. The value can be None or a matplotlib marker
+	             string. This may also be a line '-', dashed line '--', or
+	             dotted line ':'. This may be a single value (all data points
+	             get identical markers) or an array of values with the same
+	             length as xval and yval (data points get different markers).
+	zorder       Sets the zorder parameter that determines the stacking order of
+	             the plot elements. See matplotlib documentation.
+	yrange       None or a 2-tuple of numbers or None. If all y values of the
+	             data set lie outside this range, do not plot anything. This is
+	             useful to reduce file size and rendering time of the image.
+	transform    ETransform instance. If set, apply a transformation to the y
+	             values.
+
+	Returns:
+	matplotlib figure instance
+	"""
+	if fig is None:
+		fig = plt.gcf()
+	else:
+		fig = plt.figure(fig)
+	if axis is None:
+		axis = plt.gca()
+
+	# Handle x and y values
+	if isinstance(xval, Vector):
+		raise TypeError("plot_data_series does not take Vectors as arguments")
+	if isinstance(xval, (float, np.floating, int, np.integer)) and isinstance(yval, (list, np.ndarray)):
+		xval = [xval for _ in yval]
+	elif isinstance(yval, (float, np.floating, int, np.integer)) and isinstance(xval, (list, np.ndarray)):
+		yval = [yval for _ in xval]
+	elif isinstance(xval, (float, np.floating, int, np.integer)) and isinstance(yval, (float, np.floating, int, np.integer)):
+		xval = [xval]
+		yval = [yval]
+	if len(xval) == 0 or len(yval) == 0:
+		return
+	if len(xval) != len(yval):
+		raise ValueError("Input arrays should have equal lengths")
+	xval = np.asarray(xval)
+	yval = np.asarray(yval)
+	if isinstance(xval[0], Vector):
+		raise TypeError("plot_data_series does not take Vectors as arguments")
+
+	if nan_strict and (np.isnan(xval).sum() != 0 or np.isnan(yval).sum() != 0):
+		sys.stderr.write("Warning (plot_data_series): Incomplete data series cannot be plotted (NaN values).\n")
+		return
+	# TODO: Handle properly, e.g., by splitting into multiple pieces (and/or
+	# removing the NaN values. It may also be possible to ignore this
+	# restriction at all
+
+	# Apply a transformation to the data
+	if isinstance(transform, ETransform):
+		if len(xval) > 1 and np.amax(np.abs(np.diff(xval))) < 1e-6:
+			# If all x values are equal, pass it as a single value and turn the
+			# y values into an array with first axis of length 1.
+			yval = transform.apply([yval], xval[0])
+		else:
+			yval = transform.apply(yval, xval)
+	elif transform is not None:
+		raise TypeError("Argument transform must be an ETransform instance")
+
+	if len(xval) == 0 or len(yval) == 0:
+		return
+
+	# Handle range
+	if yrange is not None:
+		ymin, ymax = tuple(yrange)
+		if transform is not None:
+			ymin, ymax = transform.min(ymin), transform.max(ymax)
+	else:
+		ymin, ymax = yval.min(), yval.max()
+	ymin, ymax = 1.1 * ymin - 0.1 * ymax, -0.1 * ymin + 1.1 * ymax  # extend slightly
+
+	# Handle colors
+	if colors is None:
+		colors = get_config('plot_dispersion_default_color')
+	if isinstance(colors, np.ndarray):
+		if colors.shape != (len(xval), 3):
+			raise ValueError
+		colors = [tuple(c) for c in colors]
+	if isinstance(colors, list) and len(colors) > 0:
+		same_colors = True
+		## For determining if colors are uniform (all the same), do not consider
+		## data points with NaN values as coordinates.
+		nanval = (np.isnan(xval) | np.isnan(yval))
+		visible_colors = [c for nan, c in zip(nanval, colors) if not nan]
+		if len(visible_colors) == 0:
+			colors = (1.0, 1.0, 1.0)  # If nothing is visible, use white.
+		else:
+			for c in visible_colors:
+				if c != visible_colors[0]:
+					same_colors = False
+					break
+			if same_colors:
+				colors = visible_colors[0]
+
+	# Handle markers
+	smallmarkers = ['.', 'x', '+']
+	if markers is None:
+		markers = 'o'
+	if isinstance(markers, (list, np.ndarray)) and len(markers) > 0:
+		same_markers = True
+		for m in markers:
+			if m != markers[0]:
+				same_markers = False
+				break
+		if same_markers:
+			markers = markers[0]
+	if isinstance(markers, (list, np.ndarray)):
+		mew = rcParams['lines.markeredgewidth']
+		markersize = rcParams['lines.markersize']
+	else:
+		mew = 0.0 if markers not in smallmarkers else rcParams['lines.markeredgewidth']
+		markersize = 4.0 if markers not in smallmarkers else rcParams['lines.markersize']
+
+	if len(xval) == 1:
+		ls_val = markers if isinstance(markers, str) and markers in ['-', '--', ':'] else 'None'
+		marker_val = markers if isinstance(markers, str) and markers not in ['-', '--', ':'] else 'None'
+		axis.plot(xval, yval, color = colors, ls = ls_val, marker = marker_val, mew = mew, markersize = markersize, zorder = zorder)
+	elif isinstance(markers, str) and markers not in ['-', '--', ':']:
+		zorder = 1 if zorder is None else zorder
+		if isinstance(colors, (str, tuple)):
+			if yrange is not None:
+				sel = (yval >= ymin) & (yval <= ymax)
+				plt.plot(xval[sel], yval[sel], color = colors, linestyle = 'None', marker = markers, mew = mew, markersize = markersize, zorder = zorder)
+			else:
+				plt.plot(xval, yval, color = colors, linestyle = 'None', marker = markers, mew = mew, markersize = markersize, zorder = zorder)
+		elif isinstance(colors, (list, np.ndarray)):
+			if len(colors) != len(xval):
+				raise ValueError
+			if yrange is not None:
+				sel = (yval >= ymin) & (yval <= ymax)
+				plt.scatter(xval[sel], yval[sel], marker = markers, c = np.asarray(colors)[sel], linewidths = mew, s = markersize**2, zorder = zorder)
+			else:
+				plt.scatter(xval, yval, marker = markers, c = np.asarray(colors), linewidths = mew, s = markersize**2, zorder = zorder)
+		else:
+			raise TypeError
+	elif isinstance(markers, str) and markers in ['-', '--', ':']:
+		zorder = 2 if zorder is None else zorder
+		if isinstance(colors, (str, tuple)):
+			axis.plot(xval, yval, color = colors, linestyle = markers, marker = 'None', zorder = zorder)
+		elif isinstance(colors, (list, np.ndarray)):
+			if len(colors) == len(xval):
+				colors = intermediate_colors(colors)
+			elif len(colors) == len(xval) - 1:
+				pass
+			else:
+				raise ValueError("Color array has incorrect length")
+			if yrange is not None and (yval.min() > ymax or yval.max() < ymin):
+				return fig
+
+			xy = np.vstack((xval, yval)).T
+			xy = xy.reshape(-1, 1, 2)
+			segments = np.hstack([xy[:-1], xy[1:]])
+			sel = np.all(~np.isnan(segments), axis=(1, 2))  # segments not containing NaN
+			if np.count_nonzero(sel) == 0:
+				return fig
+			if isinstance(colors, list):
+				colors = [c for c, s in zip(colors, sel) if s]
+			elif isinstance(colors, np.ndarray):
+				colors = colors[sel]
+			linestyle = 'solid' if markers == '-' else 'dashed' if markers == '--' else 'dotted'
+			coll = LineCollection(segments[sel], colors = colors, linestyles = linestyle, zorder = zorder)
+			axis.add_collection(coll)
+			return fig
+		else:
+			raise TypeError
+	elif isinstance(markers, (list, np.ndarray)):
+		zorder = 1 if zorder is None else zorder
+		all_markers = set(list(markers))
+		markers = np.asarray(markers)
+		# group by marker
+		for m in all_markers:
+			x = xval[markers == m]
+			y = yval[markers == m]
+			if isinstance(colors, str):
+				col = colors
+			elif isinstance(colors, tuple):
+				col = np.asarray([colors])
+			elif isinstance(colors, (list, np.ndarray)):
+				col = np.asarray(colors)[markers == m]
+			else:
+				raise TypeError("Invalid type for variable colors")
+			if yrange is not None:
+				sel = (y >= ymin) & (y <= ymax)
+				if isinstance(col, np.ndarray) and len(col) > 1:
+					col = col[sel]
+				plt.scatter(x[sel], y[sel], marker = m, c = col, linewidths = mew, s = markersize**2, zorder = zorder)
+			else:
+				plt.scatter(x, y, marker = m, c = col, linewidths = mew, s = markersize**2, zorder = zorder)
+	elif isinstance(markers, SpinArrows):
+		quiveropts = get_quiver_opts()
+		zorder = 0 if zorder is None else zorder
+		u, v = markers.get_uv()
+		if isinstance(colors, (str, tuple)):
+			if yrange is not None:
+				sel = (yval >= ymin) & (yval <= ymax)
+				plt.quiver(xval[sel], yval[sel], u[sel], v[sel], color = colors, zorder = zorder - 1, **quiveropts)
+				plt.plot(xval[sel], yval[sel], color = colors, linestyle = 'None', marker = 'o', mew = mew, markersize = markersize, zorder = zorder)
+			else:
+				plt.quiver(xval, yval, u, v, color = colors, zorder = zorder - 1, **quiveropts)
+				plt.plot(xval, yval, color = colors, linestyle = 'None', marker = 'o', mew = mew, markersize = markersize, zorder = zorder)
+		elif isinstance(colors, (list, np.ndarray)):
+			if len(colors) != len(xval):
+				raise ValueError
+			if yrange is not None:
+				sel = (yval >= ymin) & (yval <= ymax)
+				plt.quiver(xval[sel], yval[sel], u[sel], v[sel], color = np.asarray(colors)[sel], zorder = zorder - 1, **quiveropts)
+				plt.scatter(xval[sel], yval[sel], marker = 'o', c = np.asarray(colors)[sel], linewidths = mew, s = markersize**2, zorder = zorder)
+			else:
+				plt.quiver(xval, yval, u, v, color = np.asarray(colors), zorder = zorder - 1, **quiveropts)
+				plt.scatter(xval, yval, marker = 'o', c = np.asarray(colors), linewidths = mew, s = markersize**2, zorder = zorder)
+		else:
+			raise TypeError
+	else:
+		raise TypeError
+	return fig
+
+energies_tex = {
+	'ef': r"$E_\mathrm{F}$",
+	'ef0': r"$E_\mathrm{F,0}$",
+	'mu': r"$\mu$",
+	'mu0': r"$\mu_0$",
+	'e0': r"$E_0$",
+	'mu,mu0': r"$\mu\approx\mu_0$",
+	'ef,ef0': r"$E_\mathrm{F}\approx E_{\mathrm{F},0}$"
+}
+
+def plot_energies(energies, xval = None, yval = None, acc = 1.0, text = True, transform = None):
+	"""Function for plotting special energies, like Fermi energy and chemical potential.
+	The function draws horizontal or vertical dashed lines with text labels in
+	in an existing plot. If a pair of the specified special energies is almost
+	equal, draw only one of them.
+
+	Arguments:
+	energies     A dict instance that contains the energies. Valid keys are:
+	             'ef', 'e0', 'mu', 'mu0'.
+	xval, yval   One of both must be numeric, the other None. Set the x value or
+	             or y value where the labels should be drawn. If the y axis is
+	             energy, set the x value; and vice versa.
+	acc          Numerical value denoting 'accuracy'. This is the maximum
+	             difference in meV for two energies to be considered 'almost
+	             equal'.
+	text         If True, show labels (text). If False, hide labels.
+	transform    ETransform instance. Apply this transformation to the energy
+	             axis.
+
+	No return value.
+	"""
+	if energies is None:
+		return
+	if xval is None and yval is None or xval is not None and yval is not None:
+		sys.stderr.write("ERROR (plot_energies): Either xval or yval must be set (exactly one of them).\n")
+		exit(1)
+	color = get_config('plot_dispersion_energies_color')
+	if color == '':
+		color = rcParams['lines.color']
+
+	energies1 = {e: energies[e] for e in energies if isinstance(energies[e], (float, int, np.floating, np.integer))}  # make a copy that can be manipulated
+	if isinstance(energies, float):
+		energies1['ef'] = {'ef': energies}
+	elif not isinstance(energies, dict):
+		sys.stderr.write("ERROR (plot_energies): Fermi energy argument must be None, a float, or a dict.\n")
+		exit(1)
+
+	# do not show e0 for if close to ef0 or ef (mu0 or mu)
+	ef0 = energies1['ef0'] if 'ef0' in energies1 else energies1.get('mu0')
+	ef = energies1['ef'] if 'ef' in energies1 else energies1.get('mu')
+	e0 = energies1.get('e0')
+
+	if ef is not None and e0 is not None and abs(ef - e0) < acc:
+		del energies1['e0']
+	if ef0 is not None and ef is not None and abs(ef0 - ef) < acc:
+		if 'ef0' in energies1:
+			del energies1['ef0']
+		elif 'mu0' in energies1:
+			del energies1['mu0']
+
+	# do not show mu and mu0 both, if they are close
+	if 'mu0' in energies1 and 'mu' in energies1 and abs(ef0 - ef) < acc:
+		energies1['mu,mu0'] = energies1['mu']
+		del energies1['mu']
+		del energies1['mu0']
+	# do not show ef and ef0 both, if they are close
+	if 'ef0' in energies1 and 'ef' in energies1 and abs(ef0 - ef) < acc:
+		energies1['ef,ef0'] = energies1['ef']
+		del energies1['ef0']
+		del energies1['ef']
+
+	ax = plt.gca()
+	for ee in energies1:
+		val = energies1[ee]
+		if val is None:
+			continue
+		point_at_zero = (ee == 'e0')
+		e_txt = energies_tex.get(ee, '')
+
+		if isinstance(transform, ETransform):
+			val = transform.apply(val)
+
+		if yval is None:
+			if point_at_zero:
+				plt.plot(0.0, val, '+', color=color)
+				txtx = 0.0
+			elif isinstance(xval, float):
+				plt.plot(xval, val, '+', color=color)
+				txtx = xval
+			else:
+				plt.plot(xval, [val for _ in xval], '--', color=color)
+				txtx = ax.transData.inverted().transform(ax.transAxes.transform((0.98, 0.0)))[0]
+			if text:
+				ax.text(txtx, val, e_txt, ha = 'right', va = 'bottom')
+		elif xval is None:
+			if point_at_zero:
+				plt.plot(val, 0.0, '+', color=color)
+				txty = 0.0
+			elif isinstance(yval, float):
+				plt.plot(val, yval, '+', color=color)
+				txty = yval
+			else:
+				plt.plot([val for _ in yval], yval, '--', color=color)
+				txty = ax.transData.inverted().transform(ax.transAxes.transform((0.0, 0.98)))[1]
+			if text:
+				ax.text(val, txty, e_txt, ha = 'left', va = 'top')
+	return
+
+def select_quadrant(phi, q, degrees = False):
+	"""Helper function for imshow_polar. It is necessary that each quadrant is plotted separately.
+	If the selected values do not align with a multiple of pi / 2 or 90	degrees
+	at the lower / upper bound, extend the bound by selecting one more value.
+	"""
+
+	phi1 = phi * np.pi / 180. if degrees else phi
+	if q == 0:
+		raise ValueError("Quadrant 0 is not a valid input")
+	if q > 0:
+		q -= 1
+	phimin = q * np.pi / 2.
+	phimax = (q + 1) * np.pi / 2.
+	phi_s = (phi1 >= phimin) & (phi1 <= phimax)
+	if np.count_nonzero(phi_s) == 0:
+		return phi_s
+	if np.amin(phi1[phi_s]) - phimin > 1e-9:  # If multiple of pi / 2 (90 deg) is not included ...
+		if np.count_nonzero(phi1 < phimin) > 0:
+			phimin = np.amax(phi1[phi1 < phimin])  # ... add one more value.
+	if np.amax(phi1[phi_s]) - phimax < -1e-9:  # If multiple of pi / 2 (90 deg) is not included ...
+		if np.count_nonzero(phi1 > phimax) > 0:
+			phimax = np.amin(phi1[phi1 > phimax])  # ... add one more value.
+	return (phi1 >= phimin) & (phi1 <= phimax)
+
+def imshow_polar(Phi, R, C, axis = None, interpolation = None, **kwds):
+	"""Wrapper function for matplotlib's imshow for polar plots.
+	In principle, matplotlib's imshow can show data in polar coordinates, but
+	the data needs to be 'regularized' in order to prevent graphical glitches.
+	The main two roles of this function are to divide the data into quadrants,
+	which are plotted separately, and to interpolate the colour data in order to
+	improve plot quality.
+
+	Arguments:
+	Phi, R     Array of angular and radial coordinates, respectively.
+	C          Data. This is an array of colours (typically RGB triplets) whose
+	           dimensions correspond to the sizes of Phi and R.
+	axis       matplotlib axis instance in which to draw the data; if None, use
+	           the current axis.
+	interpolation  Interpolation type. See matplotlib documentation for imshow
+	               for permitted values. If None, do not interpolate.
+
+	No return value.
+	"""
+	if axis is None:
+		axis = plt.gca()
+
+	r = R if R.ndim == 1 else R[:, 0]
+	phi = Phi if Phi.ndim == 1 else Phi[0]
+	dr = abs(r[1] - r[0])
+	if dr == 0:
+		raise ValueError("Array indexing in incorrect order")
+	r_new = np.linspace(r.min() + 0.25 * dr, r.max() - 0.25 * dr, (len(r) - 1) * 2)
+	rval = r[r >= 0.0]
+	if len(rval) <= 1:
+		sys.stderr.write("Warning (imshow_polar): There must be more than one non-negative radius.\n")
+		return
+
+	# Iterate over quadrants
+	for q in range(0, 4):
+		phi_p = select_quadrant(phi, q + 1)  # 0 to 360 degrees
+		phi_m = select_quadrant(phi, q - 4)  # -360 to 0 degrees
+		if np.count_nonzero(phi_p) > 1:
+			if np.count_nonzero(phi_m) > 1:
+				sys.stderr.write("Warning (imshow_polar): Data in each quadrant must be continuous, not equivalent modulo 2 pi (360 degrees).\n")
+			phi_s = phi_p
+			phimin, phimax = q * np.pi / 2., (q + 1) * np.pi / 2.
+		elif np.count_nonzero(phi_m) > 1:
+			phi_s = phi_m
+			phimin, phimax = (q - 4) * np.pi / 2., (q - 3) * np.pi / 2.
+		else:
+			continue
+
+		phival = phi[phi_s]
+		if phival[1] - phival[0] > np.pi / 12.1:  # >= 15 degrees approximately
+			phi_new = np.linspace(phimin + np.pi / 72., phimax - np.pi / 72., 18)  # 5 degree steps
+		elif phival[1] - phival[0] > np.pi / 61.:  # >= 3 degrees approximately
+			phi_new = np.linspace(phimin + np.pi / 360., phimax - np.pi / 360., 90)  # 1 degree steps
+		else:
+			phi_new = np.linspace(phimin + np.pi / 1080., phimax - np.pi / 1080., 270)  # 1/3 degree steps
+
+		C1 = color_interpolation(rval, phival, C[(r >= 0.0), :][:, phi_s], r_new, phi_new)
+		C1 = np.where(np.isnan(C1), np.ones_like(C1), C1)  # clear NaN values (set to 1 = white)
+
+		axis.imshow(np.clip(C1, 0, 1), extent = [phimin, phimax, max(0.0, min(r)), max(r)], interpolation = interpolation, origin = "lower", **kwds)
+	return
diff --git a/kdotpy-v1.0.0/src/kdotpy/ploto/toolslegend.py b/kdotpy-v1.0.0/src/kdotpy/ploto/toolslegend.py
new file mode 100644
index 0000000000000000000000000000000000000000..7297f418f58da8c3016fa70d4b1cb9c60df275d0
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/ploto/toolslegend.py
@@ -0,0 +1,478 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+import sys
+
+from matplotlib import use as mpluse
+mpluse('pdf')
+import matplotlib.pyplot as plt
+from matplotlib import rcParams
+import matplotlib.colors as mplcolors
+from matplotlib.patches import Polygon, Patch
+
+from ..config import get_config_num, get_config_num_auto, get_config_int, get_config_bool
+from .tools import get_fignum, get_plot_size, get_default_fontsize, get_legend_method
+from .colortools import parse_obsrange, make_colorbar, make_dual_indexed_colorbar, make_dual_shaded_colorbar, make_transitions_colorbar, rgb_mix, get_colormap, get_colormap_from_config
+from .toolstext import format_legend_label
+
+
+### LEGENDS AND COLOR BARS ###
+orbitalobs = ['gamma6', 'gamma8', 'gamma8l', 'gamma8h', 'gamma7']
+
+
+def legend_extends(obs):
+	"""Determine whether a colorbar is drawn that takes space outside the usual plot.
+	Depending on the observable, return True if the legend is plotted outside
+	the usual plot, e.g., for colorbars, that requires resizing either the plot
+	or the whole figure. Return False if the legend is an inset, which does not
+	consume extra space."""
+
+	return obs is None or not (obs == 'orbitalrgb' or obs.startswith('subband'))
+
+def get_legend_file(filename, insert = ".legend"):
+	"""Get the legend filename or None depending on the 'legend method' configuration setting.
+	If the legend method is 'file', return the filename for the legend based on
+	the filename of the 'parent' figure. Otherwise, return None, which indicates
+	that the legend should be drawn inside the existing figure.
+
+	Arguments:
+	filename   Name of the existing figure
+	insert     String to be inserted into the existing filename
+	"""
+
+	if get_legend_method() == 'file' and filename != "" and filename is not None:
+		fname_spl = filename.split('.')
+		return '.'.join(fname_spl[:-1]) + insert + "." + fname_spl[-1] if len(fname_spl) > 1 else filename + insert
+	else:
+		return None
+
+def add_obs_legend(color, normalized = False, obslabel = None, axis = None, narrow = False, filename = None, obsrange = None):
+	"""Add a legend for an observable (generic function).
+	Select the appropriate legend style (colorbar, rgb, or 'color patch' legend,
+	see below) based on the observable.
+
+	Arguments:
+	color       Colour type data; this may be None, a string, or a list of the
+	            form [colortype (string), obsid, ..., obsid, param, ..., param]
+	normalized  Whether a colour map is normalized. Only meaningful for RGB
+	            colour type.
+	obslabel    Observable text to be inserted into the legend.
+	axis        matplotlib axis instance in which to draw the colorbar; if None,
+	            use the current axis. If a colorbar is drawn, it may be drawn
+	            into a new axis instance inside the given axis.
+	narrow      For 'mix' colour type only: Whether to use a narrow legend
+	            (fewer, taller columns) as opposed to a wide legend (more,
+	            shorter columns)
+	filename    Filename of the plot to which the legend belongs. This is used
+	            only to determine the legend filename from, in case the 'legend
+	            method' configuration value is set to 'file'.
+	obsrange    Minimum and maximum value for a colorbar legend.
+
+	No return value.
+	"""
+
+	filename_leg = get_legend_file(filename)
+
+	# Format obslabel if it is str, 1-tuple, or 2-tuple
+	if isinstance(obslabel, str):
+		obslabel = format_legend_label(obslabel)
+	elif isinstance(obslabel, tuple) and len(obslabel) in [1, 2]:
+		obslabel = format_legend_label(*obslabel)
+
+	# Determine label automatically (not recommended)
+	if obslabel is None and isinstance(color, list) and len(color) >= 2 and isinstance(color[1], str):
+		obslabel = color[1]
+
+	# Using matplotlib colormap
+	if isinstance(color, list) and len(color) >= 5 and color[0] == "colormap":
+		omin, omax = parse_obsrange((color[2], color[3]), obsrange)
+		cmap = get_colormap(color[4:] if len(color) > 4 else color[4])
+		add_colorbar(omin, omax, axis = axis, label = obslabel, cmap = cmap, filename = filename_leg)
+
+	# Color-mapped from observable
+	elif isinstance(color, list) and len(color) in [4, 5] and color[0] == "obs":
+		omin, omax = parse_obsrange((color[2], color[3]), obsrange)
+		config_key = 'color_symmobs' if len(color) == 4 else f'color_{color[4]}'
+		cmap = get_colormap_from_config(config_key)
+		add_colorbar(omin, omax, axis = axis, label = obslabel, cmap = cmap, filename = filename_leg)
+
+	# Color-mapped from observable (sigma)
+	elif isinstance(color, list) and len(color) == 5 and color[0] == "sigma":
+		omax = max(abs(color[3]), abs(color[4]))
+		_, omax = parse_obsrange((0.0, omax), obsrange)
+		cmap = get_colormap_from_config('color_sigma')
+		add_colorbar(0.0, omax, axis = axis, label = obslabel, cmap = cmap, filename = filename_leg)
+
+	# RGB color, triplet or triplet of pairs
+	elif isinstance(color, list) and len(color) in [4, 7] and color[0].startswith("RGB"):
+		rgbk = not normalized if len(color[0]) == 3 else color[0][3] if len(color[0]) == 4 else color[0].split(';')[1] if color[0][3] == ';' else None
+		if color[1] in orbitalobs and color[2] in orbitalobs and color[3] in orbitalobs:
+			# obslabel = [r"$\Gamma_{6}$", r"$\Gamma_{8,\mathrm{LH}}$", r"$\Gamma_{8,\mathrm{HH}}$", r"$\Gamma_{7}$"]
+			add_rgblegend(axis, rgbk = rgbk, labels = obslabel, title = "orbital character", filename = filename_leg)
+		elif isinstance(color[1], str) and len(color[1]) >= 2 and color[1][0] in 'eEhHlL' and color[1][1] in '123456789':
+			# obslabel = [bt[:-1] for bt in color[1::2]]
+			add_rgblegend(axis, rgbk = rgbk, labels = obslabel, title = "subband overlap", filename = filename_leg)
+		else:
+			add_rgblegend(axis, rgbk = rgbk, labels = obslabel, title = "character", filename = filename_leg)
+
+	# color mix, number of pairs
+	elif isinstance(color, list) and len(color) >= 7 and (len(color) % 2) == 1 and color[0].startswith("mix"):
+		ncol = (len(color) - 1) // 2
+		add_mixcolor_legend(ncol, labels = obslabel, ncolumn = 1 if narrow else None, title='subband overlap', filename = filename_leg)
+
+	# indexed colors
+	elif isinstance(color, list) and len(color) == 4 and color[0] == "indexed":
+		cmin, cmax = color[2], color[3]
+		config_key = 'color_bindex' if color[1] == 'bindex' else 'color_indexed'
+		colormap = get_colormap_from_config(config_key)
+		add_colorbar(cmin, cmax, cmap = colormap, label = obslabel, ticks = list(range(int(np.ceil(cmin)), int(np.floor(cmax)) + 1)), filename = filename_leg)
+
+	# dual indexed colors
+	elif isinstance(color, list) and len(color) == 5 and color[0] == "indexedpm":
+		cmin, cmax = color[3], color[4]
+		colormap = get_colormap_from_config('color_indexedpm')
+		add_colorbar(cmin, cmax, dual_indexed = True, cmap = colormap, label = obslabel, ticks = list(range(int(np.ceil(cmin)), int(np.floor(cmax))+1)), filename = filename_leg)
+
+	# dual shaded colors
+	elif isinstance(color, list) and len(color) == 5 and color[0].startswith("shadedpm"):
+		cmin, cmax = color[3], color[4]
+		colormap = get_colormap_from_config('color_shadedpm')
+		twosided = color[0].endswith('abs') and get_config_bool('fig_colorbar_abstwosided')
+		add_colorbar(cmin, cmax, dual_shaded = True, cmap = colormap, label = obslabel, filename = filename_leg, twosided = twosided)
+
+def add_mixcolor_legend(colors, labels = None, title = None, ncolumn = None, filename = None):
+	"""Add a "mixcolor" legend.
+	Display coloured patches (that represent the mixing	components) and their
+	labels.
+
+	Arguments:
+	colors     List of colours or an integer that indicates the number of
+	           colours.
+	labels     The labels to show inside the legend.
+	title      The legend title.
+	ncolumn    If set, the number of columns; if None, determine this number
+	           automatically.
+	filename   If set, the file to write to; if None, insert the legend into the
+	           current figure.
+	Returns:
+	A matplotlib legend instance.
+	"""
+
+	if isinstance(colors, int):
+		if colors == 4:
+			colors = [(1., 0., 0.), (1., 1., 0.), (0., 1., 0.), (0., 0., 1.)]
+		else:
+			nc = colors
+			colors = [rgb_mix(np.eye(nc)[jc], None) for jc in range(0, nc)]
+	legendpatches = [Patch(color=c) for c in colors]
+	if ncolumn is None:
+		ncolumn = max(int(np.ceil(len(colors) / 3.0)), 2)
+	fontsize = get_config_num_auto('fig_legend_fontsize', minval = 5.0)
+	if fontsize is not None and fontsize > 14.0:
+		sys.stderr.write("Warning (add_mixcolor_legend): For font size (option 'fig_legend_fontsize'), values > 14 are not recommended.\n")
+	if fontsize is None:
+		fontsize = get_default_fontsize('legend.fontsize')
+	lc = rcParams['legend.labelcolor']
+	if lc is None or lc == "None":
+		lc = rcParams['text.color']
+
+	if filename is not None and filename != "":
+		fig = plt.gcf()
+		axis = plt.gca()
+		# TODO: Trim the figure to the correct size automatically. There is no
+		# simple solution to achieve this. We apply the following workaround:
+		# Set the figure size according to the number of columns and rows. The
+		# result may be poor if non-default values are being set in the
+		# matplotlibrc file (e.g., font size, legend padding, etc.)
+		hsize = ncolumn * (2.1 * fontsize) + 8.0
+		vsize = np.ceil(len(colors) / ncolumn) * 0.8 * fontsize + 6.0
+		fig_leg = plt.figure(get_fignum(), figsize = (hsize/25.4, vsize/25.4))
+		plt.axis("off")
+		legend = plt.legend(handles = legendpatches, labels = labels, loc='center', ncol = ncolumn, title = title, fontsize = fontsize)
+		legend.get_title().set_fontsize(fontsize)
+		legend.get_title().set_color(lc)
+		fig_leg.savefig(filename)
+
+		# return to original figure and axis
+		plt.figure(fig.number)
+		plt.sca(axis)
+	else:
+		legend = plt.legend(handles = legendpatches, labels = labels, loc='upper right', ncol = ncolumn, title = title, fontsize = fontsize)
+		legend.get_title().set_fontsize(fontsize)
+		legend.get_title().set_color(lc)
+	return legend
+
+def add_colorbar(vmin, vmax, cmap = None, filename = None, axis = None, dual_indexed = False, dual_shaded = False, transitions = False, **kwds):
+	"""Make a colorbar and either insert it into the current figure or to a separate file
+	This function serves as a wrapper for make_colorbar(); see colortools.py
+
+	Arguments:
+	vmin, vmax    Minimum and maximum value of the colorbar variable.
+	cmap          The colormap (a colormap instance).
+	filename      Where to save the colorbar; if it is None, add to the current
+		          figure.
+	axis          The axis where the colorbar is inserted; if None, use the
+		          current axis.
+	dual_indexed  Make a 'dual indexed' colorbar, i.e., two columns of indexed
+	              colours.
+	dual_shaded   Make a 'dual shaded' colorbar, i.e., two shaded columns.
+	**kwds        Extra keyword arguments to be forwarded to make_colorbar().
+
+	Returns:
+	cb   A matplotlib object with the colorbar. Typically, a matplotlib axis
+	     instance.
+	"""
+	if axis is None:
+		axis = plt.gca()
+	fig = plt.gcf()
+	fontsize = get_config_num_auto('fig_legend_fontsize', minval = 5.0)
+	if fontsize is not None and (fontsize < 5.0 or fontsize > 14.0):
+		sys.stderr.write("Warning (add_colorbar): For font size (option 'fig_legend_fontsize'), values > 14 are not recommended.\n")
+	if fontsize is None:
+		fontsize = get_default_fontsize('legend.fontsize')
+
+	# Set colorbar function
+	colorbar_fn = make_transitions_colorbar if transitions else make_dual_indexed_colorbar if dual_indexed else make_dual_shaded_colorbar if dual_shaded else make_colorbar
+
+	if filename != "" and filename is not None:  # create new figure
+		cb_height = get_plot_size('v', inches = True)
+		cb_width = get_plot_size('scb', inches = True)
+		cb_total = 1.0
+		cb_margin = get_plot_size("mcb") / get_plot_size("scb", legend = True)
+		cb_fraction = cb_total - cb_margin
+		cb_aspect = get_plot_size("ph", legend = True) / get_plot_size("wcb")
+
+		figc = plt.figure(get_fignum(), figsize = (cb_width, cb_height))
+		plt.subplots_adjust(left = 0.0, right = 1.0, bottom = get_plot_size('mb') / get_plot_size('v', legend = True), top = 1.0 - get_plot_size('mt') / get_plot_size('v', legend = True), wspace = 0.0, hspace = 0.0)
+		axisc = figc.add_subplot(1, 1, 1)
+		axisc.get_xaxis().set_visible(False)
+		axisc.get_yaxis().set_visible(False)
+		axisc.get_xaxis().set_ticks([])
+		axisc.get_yaxis().set_ticks([])
+		plt.axis('off')
+
+		cb = colorbar_fn(vmin, vmax, cmap = cmap, axis = axisc, fraction = cb_fraction, pad = cb_margin, aspect = cb_aspect, fontsize = fontsize, **kwds)
+
+		plt.savefig(filename)
+		# return to original figure and axis
+		plt.figure(fig.number)
+		plt.sca(axis)
+	else:  # add to original new figure
+		cb_total = (get_plot_size("scb") - get_plot_size("mr")) / get_plot_size("pw", legend = True)
+		cb_margin = get_plot_size("mcb") / get_plot_size("pw", legend = True)
+		cb_fraction = cb_total - cb_margin
+		cb_aspect = get_plot_size("ph", legend = True) / get_plot_size("wcb")
+		cb = colorbar_fn(vmin, vmax, cmap = cmap, axis = axis, fraction = cb_fraction, pad = cb_margin, aspect = cb_aspect, fontsize = fontsize, **kwds)
+
+	return cb
+
+def add_rgblegend(axis = None, fig = None, rgbk = False, labels = [None, None, None], title = None, filename = None):
+	"""RGB or RGBK legend; create an RGB triangle and add the appropriate labels.
+	This function creates the legend for observables 'orbitalrgb' and
+	'subbandrgb', for example.
+
+	Arguments:
+	axis        Parent axis object. For the legend, a new axis object is
+	            created. If None, the parent axis is the current axis. At
+	            completion, set the parent axis as the current axis.
+	fig         Current figure.
+	rgbk        If False, display RGB legend; If True, display RGBK legend. If
+	            rgbk is an RGB triplet or string (matplotlib compatible colour
+	            string), then that colour serves as the 'neutral' colour (the
+	            'k' in rgbk). The  default neutral colour (e.g., if rgbk is set
+	            to True) is black ('k').
+	labels      List of labels at the triangle vertices. The first four
+	            correspond to the R, G, and B channels and are displayed in the
+	            inner triangle for the RGBK legend. The fourth label is
+	            displayed at the vertices of the outer triangle (three copies).
+	title       Text to show above the legend.
+	filename    If given, save to this separate file; if None, then insert into
+	            the current figure.
+
+	Constants or configuration options:
+	resolution  Number of points along the edge of the triangle (inner triangle
+	            for RGBK).
+	margin      Margin size of the space between the legend and its bounding
+	            box.
+	fontsize    Point size of the displayed text.
+
+	Returns:
+	ax_in   The new matplotlib axis instance in which the legend is drawn.
+	"""
+	# constants
+	resolution = get_config_int('fig_inset_color_resolution', minval = 4)
+	fontsize = get_config_num_auto('fig_legend_fontsize')
+	if fontsize is None:
+		fontsize = 8
+	elif fontsize > 14.0:
+		sys.stderr.write("Warning (add_rgblegend): For font size (option 'fig_legend_fontsize'), values > 14 are not recommended.\n")
+	margin = 0.15 + 0.05 * (max(fontsize, 8) - 8)
+
+	# Calculate colour data
+	if rgbk:
+		xval = np.linspace(-0.55, 1.55, int(round(2.1 * resolution)) + 1)
+		deltay = 0.3
+	else:
+		xval = np.linspace(-0.05, 1.05, int(round(1.1 * resolution)) + 1)
+		deltay = 0
+	yval = xval - deltay
+	x, y = np.meshgrid(xval, yval)
+
+	tfm = [[1.0, -np.sqrt(3.0) / 3.0], [0.0, np.sqrt(3.0) * 2.0 / 3.0]]
+
+	alpha = tfm[0][0] * x + tfm[0][1] * y
+	beta  = tfm[1][0] * x + tfm[1][1] * y
+
+	if rgbk:
+		zero = np.zeros_like(alpha)
+		c1 = (alpha + beta > 1.0) & (2 * alpha + beta >= 1) & (alpha + 2 * beta >= 1)
+		c2 = (alpha < 0.0) & (2 * alpha + beta < 1) & (alpha < beta)
+		c3 = (beta < 0.0) & (alpha >= beta) & (alpha + 2 * beta < 1)
+		rr = np.where(c1, 1 - beta,  np.where(c2, zero,         np.where(c3, alpha + beta, alpha)))
+		gg = np.where(c1, 1 - alpha, np.where(c2, alpha + beta, np.where(c3, zero,         beta)))
+		bb = np.where(c1, zero,      np.where(c2, 1 - beta,     np.where(c3, 1-alpha,      1 - alpha - beta)))
+		if isinstance(rgbk, str):
+			rgbk = tuple(mplcolors.to_rgba(rgbk.lower()))
+		if isinstance(rgbk, (list, tuple, np.ndarray)) and len(rgbk) in [3, 4]:  # alpha channel allowed, but ignored
+			rem = np.clip(1.0 - (rr + gg + bb), 0.0, 1.0)  # 'remainder'
+			# mix in 'neutral' color
+			rr += rem * rgbk[0]
+			gg += rem * rgbk[1]
+			bb += rem * rgbk[2]
+	else:
+		rr = np.clip(alpha, 0.0, 1.0)
+		gg = np.clip(beta, 0.0, 1.0)
+		bb = np.clip(1 - alpha - beta, 0.0, 1.0)
+	rgb = np.dstack((rr, gg, bb))
+
+	if fig is None:
+		fig = plt.gcf()
+	if axis is None:
+		axis = plt.gca()
+
+	# Create new axis for inset
+	if filename is not None and filename != "":
+		in_size = get_config_num('fig_inset_size')
+		in_margin = get_config_num('fig_inset_margin')
+		hsize = (in_size + 2 * in_margin)
+		vsize = hsize
+		plt.figure(get_fignum(), figsize = (hsize / 25.4, vsize / 25.4))
+		in_left, in_right, in_bottom, in_top = hsize - in_margin - in_size, hsize - in_margin, vsize - in_margin - in_size, vsize - in_margin
+	else:
+		hsize = get_plot_size('h', inches = False)
+		vsize = get_plot_size('v', inches = False)
+		rmargin = get_plot_size('mr', inches = False)
+		tmargin = get_plot_size('mt', inches = False)
+		in_size = get_config_num('fig_inset_size')
+		in_margin = get_config_num('fig_inset_margin')
+		in_left, in_right, in_bottom, in_top = hsize - rmargin - in_margin - in_size, hsize - rmargin - in_margin, vsize - tmargin - in_margin - in_size, vsize - tmargin - in_margin
+
+	facecolor = rcParams['legend.facecolor']
+	if facecolor == 'inherit':
+		facecolor = rcParams['axes.facecolor']
+	ax_in = plt.axes(
+		[in_left / hsize, in_bottom / vsize, (in_right - in_left) / hsize, (in_top - in_bottom) / vsize],
+		facecolor=facecolor
+	)  # formerly with 'transform = fig.transFigure', which is probably unnecessary
+	ax_in.patch.set_alpha(rcParams['legend.framealpha'])
+	ax_in.get_xaxis().set_visible(False)
+	ax_in.get_yaxis().set_visible(False)
+	ax_in.get_xaxis().set_ticks([])
+	ax_in.get_yaxis().set_ticks([])
+	if rgbk:
+		v_offset = 0.5 * (margin - 0.15)
+		ax_in.axis([-0.5 - margin, 1.5 + margin, -0.5 - margin + v_offset - deltay, 1.5 + margin + v_offset - deltay])
+	else:
+		ax_in.axis([-margin, 1.0 + margin, -margin - deltay, 1.0 + margin - deltay])
+
+	# Plot colours, clip by a triangle
+	ec = rcParams['axes.edgecolor']
+	# NOTE: We do not use legend.edgecolor, because that is meant for the edges
+	# of the frame patch.
+	if rgbk:
+		img = ax_in.imshow(np.clip(rgb, 0, 1), origin = 'lower', interpolation = 'bilinear', extent = [-0.55, 1.55, -0.55 - deltay, 1.55 - deltay])
+		spoly = Polygon([[0.0, 0.0], [1.0, 0.0], [0.5, 0.5 * np.sqrt(3.0)]], closed = True, fc = 'none', ec = ec, joinstyle = 'bevel', transform = ax_in.transData)
+		lpoly = Polygon([[-0.5, 0.5 * np.sqrt(3.0)], [1.5, 0.5 * np.sqrt(3.0)], [0.5, -0.5 * np.sqrt(3.0)]], closed = True, fc = 'none', ec = ec, transform = ax_in.transData)
+		img.set_clip_path(lpoly)
+		ax_in.add_patch(spoly)
+		ax_in.add_patch(lpoly)
+	else:
+		img = ax_in.imshow(np.clip(rgb, 0, 1), origin = 'lower', interpolation = 'bilinear', extent = [-0.05, 1.05, -0.05 - deltay, 1.05 - deltay])
+		poly = Polygon([[0.0, 0.0], [1.0, 0.0], [0.5, 0.5 * np.sqrt(3.0)]], closed = True, fc = 'none', ec = ec, transform = ax_in.transData)
+		img.set_clip_path(poly)
+		ax_in.add_patch(poly)
+
+	# Orbital labels
+	lc = rcParams['legend.labelcolor']
+	if lc is None or lc == "None":
+		lc = rcParams['text.color']
+	if rgbk:
+		if labels[2] is not None:
+			ax_in.text(0.02, -0.02, labels[2], ha = 'right', va = 'top', fontsize = fontsize)
+		if labels[0] is not None:
+			ax_in.text(1.02, -0.02, labels[0], ha = 'left', va = 'top', fontsize = fontsize)
+		if labels[1] is not None:
+			ax_in.text(0.5, 0.5 * np.sqrt(3.0) + 0.01, labels[1], ha = 'center', va = 'bottom', fontsize = fontsize)
+		if title is not None:
+			ax_in.text(0.5, 0.98, title, ha = 'center', va = 'top', fontsize = fontsize, color = lc, transform = ax_in.transAxes)
+		if len(labels) >= 4 and labels[3] is not None:
+			ax_in.text(-0.5, 0.5 * np.sqrt(3.0) + 0.01, labels[3], ha = 'center', va = 'bottom', fontsize = fontsize)
+			ax_in.text(1.5, 0.5 * np.sqrt(3.0) + 0.01, labels[3], ha = 'center', va = 'bottom', fontsize = fontsize)
+			ax_in.text(0.54 + 0.005 * fontsize, -0.5 * np.sqrt(3.0) + 0.02, labels[3], ha = 'left', va = 'center', fontsize = fontsize)
+	else:
+		if labels[2] is not None:
+			ax_in.text(-0.08, -0.02, labels[2], ha = 'left', va = 'top', fontsize = fontsize)
+		if labels[0] is not None:
+			ax_in.text(1.08, -0.02, labels[0], ha = 'right', va = 'top', fontsize = fontsize)
+		if labels[1] is not None:
+			ax_in.text(0.5, 0.5 * np.sqrt(3.0) + 0.01, labels[1], ha = 'center', va = 'bottom', fontsize = fontsize)
+		if title is not None:
+			ax_in.text(0.5, 0.98, title, ha = 'center', va = 'top', fontsize = fontsize, color = lc, transform = ax_in.transAxes)
+
+	if filename is not None and filename != "":
+		plt.savefig(filename)
+		# Reset to original figure
+		plt.figure(fig.number)
+
+	# Reset current axis
+	plt.sca(axis)
+
+	return ax_in
diff --git a/kdotpy-v1.0.0/src/kdotpy/ploto/toolstext.py b/kdotpy-v1.0.0/src/kdotpy/ploto/toolstext.py
new file mode 100644
index 0000000000000000000000000000000000000000..4eaa9fc7c846fbff37ad02e55b43039e68e8ab4a
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/ploto/toolstext.py
@@ -0,0 +1,637 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+import sys
+import re
+
+from matplotlib import use as mpluse
+mpluse('pdf')
+import matplotlib.pyplot as plt
+from matplotlib.patches import FancyBboxPatch
+
+from ..config import get_config, get_config_bool, get_config_num
+from ..momentum import Vector
+from ..observables import obsid_to_tex
+from ..phystext import format_value, format_vector_q, format_vector_unit
+
+
+### LATEX FORMATTING OF AXIS AND LEGEND LABELS ###
+def reformat_degree(s):
+	"""Replace superscript \\circ by \\degree and delete spaces"""
+	return re.sub(r'(\\[,;: ])?\s*({})?\^{?\\circ}?', r'\\degree', s) if isinstance(s, str) else s
+
+def tex_length(string):
+	"""Get a rough length estimate of the rendered tex string."""
+	# Cut string into tex tokens
+	pos = 0
+	tex_tokens = []
+	while pos < len(string):
+		if string[pos] == "\\":  # backslashed commands
+			m = re.match(r"\\[a-zA-Z]+\s*", string[pos:])
+			if m is not None:
+				tex_tokens.append(m.group(0))
+				pos += m.end(0)
+			elif pos < len(string) - 1:
+				tex_tokens.append(string[pos:pos+2])
+				pos += 2
+			else:
+				sys.stderr.write("Warning (tex_length): Parsing error. Missing symbol after \\.")
+				tex_tokens.append("\\")
+				pos += 2
+		elif string[pos] == r'{':  # braced expression
+			bracelevel = 1
+			tex_tokens.append('{')
+			for pos1 in range(pos + 1, len(string)):
+				if string[pos1 - 1] == "\\":
+					continue
+				if string[pos1] == '{':
+					bracelevel += 1
+				elif string[pos1] == '}':
+					bracelevel -= 1
+				if bracelevel == 0:
+					tex_tokens[-1] = string[pos:pos1+1]
+					break
+			if tex_tokens[-1] == '{':
+				sys.stderr.write("Warning (tex_length): Parsing error. Brace { without matching }.\n")
+			pos += len(tex_tokens[-1])
+		elif string[pos] == '}':  # closing brace not part of a braced expression
+			sys.stderr.write("Warning (tex_length): Parsing error. Brace } without matching {.\n")
+			tex_tokens.append('}')
+			pos += 1
+		elif string[pos] in [' ', '\t', '\n']:  # whitespace (replace any length by single space)
+			m = re.match(r"\s*", string[pos:])
+			tex_tokens.append(' ')
+			pos += m.end(0)
+		elif string[pos] == '%':  # comment (stop parsing immediately)
+			break
+		else:  # generic character
+			tex_tokens.append(string[pos])
+			pos += 1
+	# print ('< <' + "><".join(tex_tokens) +'> >')  # DEBUG
+
+	# Sum length of all tex tokens
+	l = 0
+	math = False
+	for t in tex_tokens:
+		if t.startswith("\\math") or t.startswith("\\text") or t in [r'\hat', r'\tilde', r'\bar', r'\!']:
+			pass  # Non-characters: length 0
+		elif t in [r'\cdot', r'\dot', '+', '-', '/', '*']:
+			l += 2  # Operators: length 2 (one character plus two half spaces)
+		elif t.startswith("\\"):
+			l += 1  # Other backslashed items: length 1
+		elif t.startswith("{") and t.endswith("}"):
+			l += tex_length(t[1:-1])  # Braced expression: calculate length of inner expression
+		elif t == '$':
+			math = ~math  # $: toggle math
+		elif t in ['_', '^']:
+			pass  # Non-characters: length 0
+		elif t == ' ':
+			l += 0 if math else 1  # Space: length 0 in math mode, else 1
+		else:
+			if len(t) > 1:
+				sys.stderr.write("Warning (tex_length): Parsing error. Non-atomic TeX token '%s'\n" % t)
+			l += 1  # Other: length 1
+	return l
+
+def obs_latex(obsid):
+	"""LaTeX string for the built-in observables + units.
+	This function formats the observable and its unit into LaTeX format, which
+	can be used for axis and legend labels, for example. The 'real work' is done
+	in observables.obsid_to_tex(). This function provides some further
+	processing for compound or derived observables.
+
+	Arguments:
+	obsid   String
+
+	Returns:
+	qstr    String. TeX formatted string for physical quantity.
+	ustr    String. TeX formatted string for unit.
+	"""
+	if "." in obsid:
+		qstrs = []
+		ustrs = []
+		for this_obsid in obsid.split("."):
+			qstr, ustr = obsid_to_tex(this_obsid)
+			if qstr is None:
+				return None, None
+			qstrs.append(qstr)
+			if ustr is not None and ustr != "":
+				ustrs.append(ustr)
+		qstr = ", ".join(qstrs)
+		ustr = ", ".join(ustrs)
+	elif obsid.startswith('abs') and len(obsid) > 3:
+		qstr1, ustr = obsid_to_tex(obsid[3:])
+		if qstr1 is None:
+			return None, None
+		twosided = get_config_bool('fig_colorbar_abstwosided')
+		if qstr1 is not None and qstr1.startswith('$') and qstr1.endswith('$') and len(qstr1) > 2 and not twosided:
+			qstr = '$|' + qstr1[1:-1] + '|$'
+		else:
+			qstr = qstr1
+	else:
+		qstr, ustr = obsid_to_tex(obsid)
+	return qstr, ustr
+
+### AXIS (UNIT) FORMATTING ###
+_fig_unit_format = None
+def get_fig_unit_format(reset_cache = False):
+	global _fig_unit_format
+	if _fig_unit_format is None:
+		template = get_config('fig_unit_format')
+		if "%" in template:
+			spl = template.split('%')
+			_fig_unit_format = (spl[0], spl[-1])
+		elif " " in template:
+			spl = template.split(' ')
+			_fig_unit_format = (spl[0], spl[-1])
+		elif len(template) == 2:
+			_fig_unit_format = (template[0], template[1])
+		else:
+			sys.stderr.write("Warning (get_fig_unit_format): Invalid unit format. Use one of: '(%)', '( )', or '()' (without the quotes).\n")
+			_fig_unit_format = ('[', ']')  # default; see config.py
+	return _fig_unit_format
+
+def format_axis_unit(unit):
+	"""Format axis unit.
+	Depending on the configuration value 'fig_unit_format', convert the raw unit
+	string (input argument) into a properly formatted string. This function
+	takes care of division slashes and/or exponents, for example."""
+	fmt_left, fmt_right = get_fig_unit_format()
+	if fmt_left.endswith('$') and unit.startswith('$'):
+		ustr = fmt_left[:-1] + unit[1:]
+	else:
+		ustr = fmt_left + unit
+	if ustr.endswith('$') and fmt_right.startswith('$'):
+		ustr = ustr[:-1] + fmt_right[1:]
+	else:
+		ustr = ustr + fmt_right
+	# replace degree symbol
+	ustr = reformat_degree(ustr)
+	return ustr
+
+def format_axis_label(*arg):
+	"""Concatenate strings and format as unit.
+	If one argument is given, return that argument. Otherwise format the last
+	argument as a unit string using format_axis_unit(), and concatenate the
+	result to the other string arguments.
+	"""
+	sep = ' '
+	if len(arg) == 0:
+		raise ValueError("format_axis_label() expects at least 1 non-keyword argument")
+	elif len(arg) == 1:
+		lstr = arg[0]
+	else:
+		lstr = sep.join(arg[:-1])
+		if arg[-1] is not None:
+			lstr += sep + format_axis_unit(arg[-1])
+	return lstr
+
+def format_legend_label(str1, str2 = None):
+	"""Format legend label from one or two strings.
+
+	Arguments:
+	str1, str2   One or two arguments, each being str or None. If str2 is None,
+	             try to parse str1 as observable id. Otherwise, interpret str1
+	             and str2 as strings for quantity and unit, respectively.
+
+	Returns:
+	label   String. TeX formatted string suitable as label for a legend (color
+	        bar).
+	"""
+	if not (isinstance(str1, str) or str1 is None) or not (isinstance(str2, str) or str2 is None):
+		raise TypeError("Arguments must be string or None")
+	if str2 is None:
+		if str1 is None:
+			return ""
+		qstr, ustr = obs_latex(str1)
+		if qstr is None:
+			sys.stderr.write("ERROR (format_legend_label): obs_id_tex() got an invalid observable id. Perhaps a TeX string was used as a single argument for format_legend_label(). Use a single argument obsid or two arguments qstr, ustr.\n")
+			return str1
+	else:
+		qstr, ustr = str1, str2
+
+	if ustr is not None and len(ustr) > 0:
+		ustr = format_axis_unit(ustr)
+		ustr = reformat_degree(ustr)
+	qlen = tex_length(qstr)
+	ulen = tex_length(ustr)
+	if ulen == 0:
+		return qstr
+	elif qlen + ulen >= 10:
+		return qstr + '\n' + ustr
+	else:
+		return qstr + ' ' + ustr
+
+def set_xlabel(*arg):
+	"""Format and set x label.
+	Applies format_axis_label(), sets it as xlabel and returns the string."""
+	setit = True
+	lstr = format_axis_label(*arg)
+	if setit:
+		plt.xlabel(lstr)
+	return lstr
+
+def set_ylabel(*arg):
+	"""Format and set y label.
+	Applies format_axis_label(), sets it as ylabel and returns the string."""
+	setit = True
+	lstr = format_axis_label(*arg)
+	if setit:
+		plt.ylabel(lstr)
+	return lstr
+
+def set_disp_axis_label(kname, set_x = False, set_y = False):
+	"""Determine the label for the x axis (dispersion plots, etc.)
+	This function takes the variable component, e.g., 'kx', and formats it into
+	an axis label with units if appropriate. It can also apply the axis label
+	immediately to the x and/or y axis in the current figure.
+
+	Arguments:
+	kname     String. The vector component, for example 'kx'.
+	set_x     True or False. Whether to set the axis label as xlabel.
+	set_y     True or False. Whether to set the axis label as ylabel.
+
+	Returns
+	TeX-formatted string for the axis label.
+	"""
+	qstr = format_vector_q(kname, style = 'tex')
+	ustr = format_vector_unit(kname, style = 'tex')
+	if qstr is None:
+		return ""
+	lstr = format_axis_label(qstr, ustr)  # ustr = None is handled properly
+	lstr = reformat_degree(lstr)
+	if set_x:
+		plt.xlabel(lstr)
+	if set_y:
+		plt.ylabel(lstr)
+	return lstr
+
+### TEXT ELSEWHERE ###
+def get_partext(pval, pname, accuracy = 1e-10):
+	"""Determine the auxiliary label (placed in the upper left corner, usually)
+
+	Arguments:
+	pval      Numeric, Vector instance, or None. The parameter value. If None,
+	          return the empty string.
+	pname     String. The parameter/variable name. This may be a variable
+	          component like 'kx'.
+	accuracy  Positive float. If the parameter value is smaller in absolute
+	          value, use the value 0.
+
+	Returns:
+	TeX-formatted parameter text
+	"""
+	float_fmt = "{:.3g}"
+	partext = ""
+	if pname is None or pval is None:
+		return ""
+	elif pname == "kdir" and isinstance(pval, (list, tuple)):
+		sep = '\\,'
+		for p in pval:
+			if p < 0 or p >= 10:
+				sep = ','
+				break
+		partext = "For $\\vec{k}$ along $[%s]$" % sep.join(['%s' % p for p in pval])
+	elif isinstance(pname, tuple) and isinstance(pval, tuple):
+		if len(pname) != len(pval):
+			raise ValueError("Input arguments pname and pval of tuple type must have equal length.")
+		varstrs = [format_vector_q(n, style = 'tex').strip('$') for n in pname]
+		valstrs = [format_value(0 if abs(v) < accuracy else v, style = 'tex', fmt = float_fmt).strip('$') for v in pval]
+		ustrs = [format_vector_unit(n, style = 'tex').strip('$') for n in pname]
+		varstr = ", ".join(varstrs)
+		if all([u == ustrs[0] for u in ustrs]):
+			valstr = ", ".join(valstrs)
+			partext = "For $(%s)=(%s)\\ %s$" % (varstr, valstr, ustrs[0])
+		else:
+			valstr = ", ".join(["%s\\ %s" % vs_us for vs_us in zip(valstrs, ustrs)])
+			partext = "For $(%s)=(%s)$" % (varstr, valstr)
+	elif isinstance(pname, str) and isinstance(pval, (float, np.floating, int, np.integer)):
+		varstr = format_vector_q(pname, style = 'tex').strip('$')
+		valstr = format_value(0 if abs(pval) < accuracy else pval, style = 'tex', fmt = float_fmt).strip('$')
+		ustr = format_vector_unit(pname, style = 'tex').strip('$')
+		partext = "For $%s=%s\\ %s$" % (varstr, valstr, ustr)
+	else:
+		raise TypeError("Invalid combination of arguments pname and pval")
+	partext = reformat_degree(partext)
+	return partext
+
+def add_char_labels(bandchar, axis = None, fig = None, k0 = None, xrange = None, yrange = None, size = None, box = True, transform = None):
+	"""Add (band) character labels.
+	Places character labels near the bands at k = 0. If multiple bands bunch up
+	at the same energy, concatenate the corresponding labels with commas.
+
+	Arguments:
+	bandchar   A dict instance where the keys are the labels and the items the
+	           energy values.
+	axis       matplotlib axis instance in which the band labels should be drawn
+	           drawn; if None, use the current axis
+	fig        matplotlib figure instance in which the band labels should be
+	           drawn; if None, use the current figure
+	xrange     The extent of the horizontal axis. If None, determine
+	           automatically.
+	yrange     The extent of the vertical axis. If None, determine
+	           automatically.
+	size       Font size
+	box        If True, draw a box around the labels.
+	transform  ETransform instance. Transform the energy values to a different
+	           vertical coordinate.
+
+	Returns:
+	matplotlib figure instance.
+	"""
+	if fig is None:
+		fig = plt.gcf()
+	else:
+		fig = plt.figure(fig)
+	if axis is None:
+		axis = plt.gca()
+
+	if xrange is None:  # x (typically momentum) range
+		kmin, kmax = tuple(axis.get_xlim())
+	else:
+		kmin, kmax = tuple(xrange)
+	if k0 is None:
+		k0 = 0.0
+	elif isinstance(k0, (float, np.floating, int, np.integer)):
+		k0 = float(k0)
+	elif isinstance(k0, Vector):
+		k0 = k0.len()
+	else:
+		raise TypeError("Argument k0 must be a Vector or float instance or None")
+	if k0 < kmin or k0 > kmax:
+		return fig
+
+	if yrange is None:  # y (typically energy) range
+		emin, emax = tuple(axis.get_ylim())
+	else:
+		emin, emax = tuple(yrange)
+
+	if bandchar is None or len(bandchar) == 0:
+		return fig
+
+	# structure: bandlabels = [energies, ids]
+	bandlabels = []
+	for b_t in bandchar:
+		b_e = bandchar[b_t]
+		if transform:
+			b_e = transform.apply(b_e, at_x = 0.0 if transform.xval is not None else None)
+		if emin < b_e < emax and b_t != '':
+			bandlabels.append([b_e, b_t])
+	bandlabels = sorted(bandlabels)
+
+	# Get height of the axis in points (a standard unit in typography of approx.
+	# 1/72 inch or 0.350 mm). Use a multiplier (default 0.8) to determine
+	# minimum energy spacing between two subsequent labels.
+	try:
+		bbox = axis.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
+		ax_height_pt = bbox.height * 72.0
+	except:
+		sys.stderr.write("Warning (add_char_labels): Could not determine figure size automatically.\n")
+		ax_height_pt = 240  # some default value (corresponds to 85 mm approx.)
+	space_mult = get_config_num('fig_charlabel_space', minval = 0.0)
+	size0 = 12 if size is None else size
+	d_e_labels = space_mult * (emax - emin) * (size0 / ax_height_pt)
+	if box:
+		d_e_labels *= 2.5
+	d_k_labels = (kmax - kmin) * 0.01
+
+	# "reduce" overlapping labels
+	n_labels = len(bandlabels) + 1
+	while len(bandlabels) < n_labels:
+		n_labels = len(bandlabels)
+		bandlabels1 = []
+		j = 0
+		while j < len(bandlabels):
+			if j < len(bandlabels) - 1 and abs(bandlabels[j+1][0] - bandlabels[j][0]) < d_e_labels and bandlabels[j+1][1][:-1] == bandlabels[j][1][:-1]:
+				bandlabels1.append([(bandlabels[j+1][0] + bandlabels[j][0]) / 2, bandlabels[j][1][:-1]])
+				pm = 1 if bandlabels[j+1][1][-1] == '+' else -1
+				if (bandlabels[j+1][0] - bandlabels[j][0]) * pm < 0:
+					bandlabels1[-1][1] += '\u2213'  # "-+" minus-plus
+				else:
+					bandlabels1[-1][1] += '\u00B1'  # "+-" plus-minus
+				j += 2
+			elif j < len(bandlabels) - 1 and abs(bandlabels[j+1][0] - bandlabels[j][0]) < d_e_labels:
+				bandlabels1.append([(bandlabels[j+1][0] + bandlabels[j][0]) / 2, bandlabels[j][1] + ", " + bandlabels[j+1][1]])
+				j += 2
+			else:
+				bandlabels1.append(bandlabels[j])
+				j += 1
+		bandlabels = bandlabels1
+
+	# choose horizontal coordinate and horizontal alignment
+	if k0 < kmin + 0.05 * (kmax - kmin):
+		xpos, ha = kmin + 0.5 * d_k_labels, 'left'
+	elif k0 < kmin + 0.1 * (kmax - kmin):
+		xpos, ha = k0 + d_k_labels, 'left'
+	elif k0 > kmax - 0.1 * (kmax - kmin):
+		xpos, ha = k0 - d_k_labels, 'right'
+	elif k0 > kmax - 0.05 * (kmax - kmin):
+		xpos, ha = kmax - 0.5 * d_k_labels, 'right'
+	else:
+		xpos, ha = k0, 'center'
+	# xpos = k0 + (d_k_labels if 0.0 - kmin < 0.1 * (kmax - kmin) else -d_k_labels if kmax - 0.0 < 0.1 * (kmax - kmin) else 0.0)  # choose horizontal coordinate
+	# ha = 'left' if 0.0 - kmin < 0.1 * (kmax - kmin) else 'right' if kmax - 0.0 < 0.1 * (kmax - kmin) else 'center'  # choose horizontal alignment
+	y_offset = -0.25 * d_e_labels
+	boxprop = dict(boxstyle='round', facecolor = 'w', pad = 0.2, alpha = 0.5) if box else None
+	for b in bandlabels:
+		if emin + 0.5 * d_e_labels < b[0] + y_offset < emax - 0.5 * d_e_labels:
+			txt = axis.text(xpos, b[0] + y_offset, b[1].replace('-', '\u2212'), ha = ha, va='center', fontsize = size, bbox = boxprop)  # , backgroundcolor=(1.0, 0.0, 0.0, 0.5))
+
+	return fig
+
+def add_band_labels(eival, bindex, llindex = None, axis = None, fig = None, k0 = None, xrange = None, yrange = None, size = None, box = True, transform = None):
+	"""Add band labels, band index or (LL index, band index).
+	Places character labels near the bands at k = 0. If multiple bands bunch up
+	at the same energy, concatenate the corresponding labels with commas.
+
+	Arguments:
+	eival      Array. Eigenvalues, like ddp.eival of a DiagDataPoint instance.
+	bindex     Array. Band indices, like ddp.bindex of a DiagDataPoint instance.
+	llindex    Array. LL indices, like ddp.llindex of a DiagDataPoint instance.
+	axis       matplotlib axis instance in which the band labels should be
+	           drawn; if None, use the current axis
+	fig        matplotlib figure instance in which the band labels should be
+	           drawn; if None, use the current figure
+	xrange     The extent of the horizontal axis. If None, determine
+	           automatically.
+	yrange     The extent of the vertical axis. If None, determine
+	           automatically.
+	size       Font size
+	box        If True, draw a box around the labels.
+	transform  ETransform instance. Transform the energy values to a different
+	           vertical coordinate.
+
+	Returns:
+	matplotlib figure instance.
+	"""
+	if fig is None:
+		fig = plt.gcf()
+	else:
+		fig = plt.figure(fig)
+	if axis is None:
+		axis = plt.gca()
+
+	if xrange is None:  # x (typically momentum) range
+		kmin, kmax = tuple(axis.get_xlim())
+	else:
+		kmin, kmax = tuple(xrange)
+	if k0 is None:
+		k0 = 0.0
+	elif isinstance(k0, (float, np.floating, int, np.integer)):
+		k0 = float(k0)
+	elif isinstance(k0, Vector):
+		k0 = k0.len()
+	else:
+		raise TypeError("Argument k0 must be a Vector or float instance or None")
+	if k0 < kmin or k0 > kmax:
+		return fig
+
+	if yrange is None:  # y (typically energy) range
+		emin, emax = tuple(axis.get_ylim())
+	else:
+		emin, emax = tuple(yrange)
+
+	if bindex is None:
+		return fig
+
+	# structure: bandlabels = [energies, ids]
+	if transform:
+		if transform.xval is None:
+			eival1 = transform.apply(eival)
+		else:
+			eival1 = transform.apply([eival], at_x = 0.0)
+	else:
+		eival1 = eival
+	if llindex is None:
+		bandlabels = [[e, "%i" % b] for e, b in zip(eival1, bindex) if emin < e < emax]
+	else:
+		bandlabels = [[e, "(%i, %i)" % (ll, b)] for e, b, ll in zip(eival1, bindex, llindex) if emin < e < emax]
+	bandlabels = sorted(bandlabels)
+	if len(bandlabels) == 0:
+		return fig
+
+	d_e_labels = (emax - emin) * 0.03 if size is None else (emax - emin) * 0.0025 * size
+	if box:
+		d_e_labels *= 2.5
+	d_k_labels = (kmax - kmin) * 0.01
+
+	# "reduce" overlapping labels
+	bandlabels1 = [bandlabels[0]]
+	for lb in bandlabels:
+		if (lb[0] - bandlabels1[-1][0]) > d_e_labels:
+			bandlabels1.append(lb)
+	bandlabels = bandlabels1
+
+	# choose horizontal coordinate and horizontal alignment
+	if k0 < kmin + 0.05 * (kmax - kmin):
+		xpos, ha = kmin + 0.5 * d_k_labels, 'left'
+	elif k0 < kmin + 0.1 * (kmax - kmin):
+		xpos, ha = k0 + d_k_labels, 'left'
+	elif k0 > kmax - 0.1 * (kmax - kmin):
+		xpos, ha = k0 - d_k_labels, 'right'
+	elif k0 > kmax - 0.05 * (kmax - kmin):
+		xpos, ha = kmax - 0.5 * d_k_labels, 'right'
+	else:
+		xpos, ha = k0, 'center'
+	# xpos = k0 + (d_k_labels if 0.0 - kmin < 0.1 * (kmax - kmin) else -d_k_labels if kmax - 0.0 < 0.1 * (kmax - kmin) else 0.0) # choose horizontal coordinate
+	# ha = 'left' if 0.0 - kmin < 0.1 * (kmax - kmin) else 'right' if kmax - 0.0 < 0.1 * (kmax - kmin) else 'center' # choose horizontal alignment
+	y_offset = -0.25 * d_e_labels
+	boxprop = dict(boxstyle='round', facecolor = 'w', pad = 0.2, alpha = 0.5) if box else None
+	for b in bandlabels:
+		if b[0] + y_offset > emin + 0.5 * d_e_labels and b[0] + y_offset < emax - 0.5 * d_e_labels:
+			txt = axis.text(xpos, b[0] + y_offset, b[1].replace('-', '\u2212'), ha = ha, va='center', fontsize = size, bbox = boxprop)  # , backgroundcolor=(1.0, 0.0, 0.0, 0.5))
+
+	return fig
+
+def set_band_label_2d(label, axis = None):
+	"""Band label for 2D band plots.
+	Write boxed text in the corner of a 2D dispersion plot.
+
+	Arguments:
+	label  Label text
+	axis   matplotlib axis instance in which the label should be drawn; if None,
+	       use the current axis
+
+	No return value.
+	"""
+	if axis is None:
+		axis = plt.gca()
+	fig = plt.gcf()
+	if label.startswith('G'):
+		txt = re.sub(r"G([678])([LH]?)([-+]?)", r'\\Gamma_{\1\2}\3', label)
+		txt = '$' + txt + '$'
+		width = max(0.015 * len(label), 0.035)
+	else:
+		txt = label.replace('-', '\u2212')
+		width = max(0.018 * len(label) - 0.008, 0.035)
+	axis.add_patch(FancyBboxPatch((0.15, 0.93), width, 0.025, boxstyle="round,pad=0.01", fc="white", ec="k", transform = fig.transFigure))
+	axis.text(0.15, 0.93, txt + " ", ha = 'left', va = 'baseline', transform = fig.transFigure)
+
+# Position of plot title
+def get_title_position(where, default = (0.5, 0.98, 'center', 'top')):
+	"""Get position of the plot title.
+	Arguments:
+	where     Label specifying the position. Inspect the code below for
+	          permitted values.
+	default   If where is None, 'auto' or 'automatic', return these values.
+
+	Returns:
+	x, y      Coordinates
+	ha, va    Horizontal and vertical text alignment
+	"""
+	# return format x, y, ha, va
+	if where is None or where == 'auto' or where == 'automatic':
+		return default
+	elif where in ['t', 'top', 'n', 'north', 'topcenter', 'center']:
+		return 0.5, 0.98, 'center', 'top'
+	elif where in ['b', 'bottom', 's', 'south', 'bottomcenter']:
+		return 0.5, 0.02, 'center', 'bottom'
+	elif where in ['tl', 'topleft', 'l', 'left', 'nw', 'northwest']:
+		return 0.03, 0.98, 'left', 'top'
+	elif where in ['bl', 'bottomleft', 'sw', 'southwest']:
+		return 0.03, 0.02, 'left', 'bottom'
+	elif where in ['tr', 'topright', 'r', 'right', 'ne', 'northeast']:
+		return 0.97, 0.98, 'right', 'top'
+	elif where in ['br', 'bottomright', 'sw', 'southwest']:
+		return 0.97, 0.02, 'right', 'bottom'
+	else:  # if where in ['e', 'w', 'east', 'west']:
+		sys.stderr.write("ERROR (get_title_position): %s is not a valid plot title position.\n")
+		return default
diff --git a/kdotpy-v1.0.0/src/kdotpy/ploto/toolsticks.py b/kdotpy-v1.0.0/src/kdotpy/ploto/toolsticks.py
new file mode 100644
index 0000000000000000000000000000000000000000..eec79e5055d4e0b31335e02733f8f2062e86b742
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/ploto/toolsticks.py
@@ -0,0 +1,265 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+import sys
+
+from matplotlib import use as mpluse
+mpluse('pdf')
+import matplotlib.pyplot as plt
+import matplotlib.ticker as mpltick
+
+from ..config import get_config
+from ..physconst import hbar
+
+
+### TICKS HANDLING ###
+def get_tick_setting(config_key_major='fig_ticks_major', config_key_minor='fig_ticks_minor'):
+	"""Get ticks setting from configuration."""
+	major_setting = get_config(config_key_major)
+	minor_setting = get_config(config_key_minor)
+	if ',' in major_setting:
+		major = [x.lower().strip().lstrip() for x in major_setting.split(',')[0:2]]
+	else:
+		major = [major_setting.lower().strip().lstrip(), major_setting.lower().strip().lstrip()]
+	if ',' in minor_setting:
+		minor = [x.lower().strip().lstrip() for x in minor_setting.split(',')[0:2]]
+	else:
+		minor = [minor_setting.lower().strip().lstrip(), minor_setting.lower().strip().lstrip()]
+	return major, minor
+
+def get_tick_step(loc, integer = True):
+	"""Determine tick step.
+	This function extracts the tick step from a matplotlib locator instance. In
+	the default setting (integer = True), this function returns the smallest
+	integer j such that the distance between two ticks equals j * 10^n for some
+	n. For example, a distance of 0.01 yields 1; 0.05 yields 5; 0.025 yields 25.
+
+	Arguments:
+	loc      matplotlib locator instance
+	integer  If True, multiply by factors of ten in order to get integer values.
+	"""
+	try:
+		ticks = loc()
+		dx = ticks[1] - ticks[0]
+	except:
+		step = None
+	else:
+		step = dx * 10**-np.floor(np.log10(dx))
+	if integer and step is not None:
+		while np.abs(np.round(step) - step) >= 1e-3 and step <= 1e5:
+			step *= 10
+		step = int(step)
+	return step
+
+def set_ticks(axis = None, xdegrees = False):
+	"""Set plot ticks.
+
+	Arguments:
+	axis      The axis for which the ticks should be determined. If None, use
+	          the current axis.
+	xdegrees  Set to True in order to indicate that the values are in degrees.
+	          This prefers a different set of ticks steps, such as 15, 30. 45,
+	          90, etc., rather than the usual 1, 2, 5, etc.
+
+	No return value.
+	"""
+	if axis is None:
+		axis = plt.gca()
+
+	major, minor = get_tick_setting()
+
+	# Iterate over x and y axis
+	axis_xy = [axis.get_xaxis(), axis.get_yaxis()]
+	if xdegrees:
+		xmin, xmax = axis.get_xlim()
+		xrng = xmax - xmin
+		degstep = 360 if xrng >= 1080 else 180 if xrng >= 480 else 90 if xrng >= 240 else 30 if xrng >= 120 else 15 if xrng >= 75 else 5 if xrng >= 20 else 1
+	for j in [0, 1]:
+		# Major ticks
+		if major[j] == 'none':
+			maj_loc = mpltick.NullLocator()
+		elif j == 0 and xdegrees and xrng > 20:
+			if major[j] == 'fewer':
+				degstep *= 2
+			elif major[j] == 'more':
+				degstep /= 2 if degstep % 2 == 0 else 1.5 if degstep % 3 == 0 else 2.5 if degstep % 5 == 0 else 1
+			maj_loc = mpltick.MultipleLocator(degstep)
+		elif major[j] in ['fewer', 'normal', 'more']:
+			nbins = 3 if major[j] == 'fewer' else 12 if major[j] == 'more' else 6
+			maj_loc = mpltick.MaxNLocator(nbins=nbins, steps=[1, 2, 2.5, 4, 5, 10])
+		elif major[j] == 'auto':
+			maj_loc = mpltick.AutoLocator()
+		else:
+			sys.stderr.write("Warning (set_ticks): Invalid ticks indicator '%s' (%s major)\n" % (major[j], 'xy'[j]))
+			maj_loc = mpltick.AutoLocator()
+		axis_xy[j].set_major_locator(maj_loc)
+		step = get_tick_step(maj_loc)
+
+		# Minor ticks
+		if minor[j] == 'none':
+			min_loc = mpltick.NullLocator()
+		elif step is None and minor[j] in ['fewer', 'normal', 'more']:
+			sys.stderr.write("Warning (set_ticks): Failed to set ticks (%s minor)\n" % 'xy'[j])
+			min_loc = mpltick.NullLocator()
+		elif minor[j] in ['fewer', 'normal', 'more']:
+			if minor[j] == 'fewer':
+				minor_subdiv = 3 if step in [3, 6, 15, 45] else 2
+			elif minor[j] == 'more':
+				minor_subdiv = 15 if step in [3, 6, 15] else 9 if step in [9, 18, 36, 45, 72] else 10
+			else:
+				minor_subdiv = 4 if step in [2, 4, 36, 72] else 3 if step in [3, 6, 9, 15, 18, 45] else 5
+			min_loc = mpltick.AutoMinorLocator(n = minor_subdiv)
+		elif minor[j] == 'auto':
+			min_loc = mpltick.AutoMinorLocator(n = None)
+		else:
+			sys.stderr.write("Warning (set_ticks): Invalid ticks indicator '%s' (%s minor)\n" % (minor[j], 'xy'[j]))
+			min_loc = mpltick.NullLocator()
+		axis_xy[j].set_minor_locator(min_loc)
+
+def set_polar_ticks(rval, thetaval, axis = None):
+	"""Set polar ticks.
+	Choose the appropriate values depending on the input values and the
+	configuration settings.
+
+	Arguments:
+	rval      Extent of the radial values. This may be an array of all radial
+	          values; only the maximum is relevant.
+	thetaval  Extent of the angular values. This may be an array of all angles;
+	          only the minimum and maximum are relevant.
+	axis      matplotlib axis instance in which the ticks should be drawn.
+	"""
+	if axis is None:
+		axis = plt.gca()
+
+	major, minor = get_tick_setting()
+
+	rmax = rval.max()
+	# thetamin = np.degrees(thetaval.min())
+	thetamax = np.degrees(thetaval.max())
+	thetamax = 90.0 if thetamax <= 90.01 else 180.0 if thetamax <= 180.01 else 360.0
+
+	rmax_div = 10**-np.floor(np.log10(rmax * 10)) if rmax < 0.1 else 1
+	rmaxs = rmax * rmax_div  # scaled values (by powers of 10)
+	if major[0] in ['auto', 'normal']:
+		rticks = [0.05, 0.1, 0.15] if rmaxs <= 0.15 else [0.1, 0.2, 0.3] if rmaxs <= 0.3 else [0.2, 0.4, 0.6, 0.8] if rmaxs <= 0.9 else np.arange(0, rmax, 0.5)[1:]
+	elif major[0] == 'none':
+		rticks = []
+	elif major[0] == 'fewer':
+		rticks = [0.1, ] if rmaxs <= 0.15 else [0.2, ] if rmaxs <= 0.3 else [0.5, ] if rmaxs <= 0.9 else np.arange(0, rmaxs, 0.5)[1:] if rmaxs <= 1.8 else np.arange(0, rmaxs, 1.0)[1:]
+	elif major[0] == 'more':
+		rticks = [0.02, 0.04, 0.06, 0.08, 0.1, 0.12, 0.14] if rmaxs <= 0.15 else [0.05, 0.1, 0.15, 0.2, 0.25] if rmaxs <= 0.3 else np.arange(0, rmaxs, 0.1)[1:] if rmaxs <= 0.9 else np.arange(0, rmaxs, 0.2)[1:]
+	else:
+		sys.stderr.write("Warning (set_polar_ticks): Invalid ticks indicator '%s' (r major)\n" % (major[0]))
+		rticks = []
+	if len(rticks) >= 1:
+		axis.set_rgrids(np.array(rticks) / rmax_div, angle = 90.0, va = 'center', ha = 'right')
+	axis.set_rmax(rmax)
+
+	# TODO: r minor
+
+	if major[1] == 'none':
+		thetaticks = []
+	elif major[1] in ['normal', 'auto']:
+		thetaticks = np.radians(np.arange(0, thetamax + 1.0, 15.0))
+	elif major[1] == 'fewer':
+		thetaticks = np.radians(np.arange(0, thetamax + 1.0, 30.0))
+	elif major[1] == 'more':
+		thetaticks = np.radians(np.arange(0, thetamax + 1.0, 5.0))
+	else:
+		sys.stderr.write("Warning (set_polar_ticks): Invalid ticks indicator '%s' (angle major)\n" % (major[0]))
+		thetaticks = []
+
+	for theta in thetaticks:
+		axis.plot([theta, theta], [rmax, rmax * 0.98], 'k-', linewidth = 0.5)
+
+	# TODO: theta minor
+
+	axis.grid(visible = True, linewidth=0.5, color='#e0e0e0', zorder=5)
+
+### SPECIAL TICKS ###
+def add_frequency_ticks(emax = None, axis = None, fdiv = None, ffmt = None, color = 'b', xmin = None, xmax = None):
+	"""Add frequency ticks (THz values) at the inner edge of the left-hand (energy) axis
+
+	Arguments:
+	emax    Maximum value of the energy axis
+	axis    matplotlib axis in which the ticks should be drawn; if None, use the
+	        current axis.
+	fdiv    Frequency division (ticks step); if None, determine automatically
+	ffmt    Format (as used in 'ffmt % value'); if None, determine automatically
+	color   The colour.
+	xmin, xmax   The extent of the x axis; if None, determine automatically
+
+	No return value.
+	"""
+	if axis is None:
+		axis = plt.gca()
+	if emax is None:
+		_, emax = axis.get_ylim()
+	fmax = emax / (2.0e3 * np.pi * hbar)
+	if fdiv is None:
+		if fmax >= 20.0:
+			fdiv, ffmt = 5.0, "%i"
+		elif fmax >= 10.0:
+			fdiv, ffmt = 2.0, "%i"
+		elif fmax >= 3.0:
+			fdiv, ffmt = 1.0, "%i"
+		elif fmax >= 1.5:
+			fdiv, ffmt = 0.5, "%.1f"
+		elif fmax >= 0.6:
+			fdiv, ffmt = 0.2, "%.1f"
+		else:
+			fdiv, ffmt = 0.1, "%.1f"
+	if ffmt is None:
+		ffmt = "%i" if fdiv >= 1.0 else "%.1f" if fdiv >= 0.1 else "%.2f"
+	if xmin is None or xmax is None:
+		xmin, xmax = axis.get_xlim()
+
+	for f_thz in np.arange(fdiv, fmax, fdiv):
+		e_thz = f_thz * 2.0e3 * np.pi * hbar
+		plt.plot([xmin, xmin + 0.01 * (xmax - xmin)], [e_thz, e_thz], color = color)
+		plt.plot([xmax, xmax - 0.01 * (xmax - xmin)], [e_thz, e_thz], color = color)
+		if e_thz > 0.98 * emax:
+			pass
+		elif (f_thz + fdiv) * 2.0e3 * np.pi * hbar > 0.98 * emax:
+			axis.text(xmin + 0.015 * (xmax - xmin), e_thz, (ffmt + " THz") % f_thz, ha = "left", va = 'center', color = color)
+		else:
+			axis.text(xmin + 0.015 * (xmax - xmin), e_thz, ffmt % f_thz, ha = "left", va = 'center', color = color)
+	return
diff --git a/kdotpy-v1.0.0/src/kdotpy/ploto/wf.py b/kdotpy-v1.0.0/src/kdotpy/ploto/wf.py
new file mode 100644
index 0000000000000000000000000000000000000000..2b79f979a653aa679a33241562019a9a8803ffa9
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/ploto/wf.py
@@ -0,0 +1,1246 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+import sys
+import os
+
+from matplotlib import use as mpluse
+mpluse('pdf')
+import matplotlib.pyplot as plt
+from matplotlib.backends.backend_pdf import PdfPages
+import matplotlib.colors as mplcolors
+import matplotlib.text
+import matplotlib.figure
+import subprocess as subp
+
+from .colortools import hsl_to_rgb, rgb_to_hsl, try_colormap
+from .tools import get_fignum, get_plot_size
+from .toolstext import set_xlabel, set_ylabel, get_partext
+
+from ..physconst import eoverhbar
+from ..phystext import orbital_labels
+from ..observables import blockdiag
+from ..momentum import Vector
+from ..config import get_config, get_config_bool, get_config_num
+
+### TOOLS ###
+def delete_intermediate_files(files):
+	"""Delete intermediate files.
+
+	Argument:
+	files   List of files to be deleted.
+
+	Returns:
+	True if all files could be deleted, otherwise False.
+	"""
+	n_failed = 0
+	if len(files) == 0:
+		return True
+	for fl in files:
+		try:
+			os.remove(fl)
+		except:
+			n_failed += 1
+	if n_failed == 0:
+		return True
+	elif n_failed == len(files):
+		sys.stderr.write("ERROR (delete_intermediate_files): None of the files could be deleted.\n")
+	else:
+		sys.stderr.write("Warning (delete_intermediate_files): Deletion of %i file%s has failed.\n" % (n_failed, "" if n_failed == 1 else "s"))
+	return False
+
+def get_bandlabel(diagdatapoint, bandlabels, j):
+	"""
+	Get band label string for state j in a DiagDataPoint instance.
+
+	Arguments:
+	diagdatapoint  DiagDataPoint instance.
+	bandlabels     Passed from other plot functions that determines what type
+	               of label should be shown. If None, determine automatically.
+	               If a string, use one label for all states. If a list or array
+	               of strings, use different labels for the states. If a tuple
+	               of the form (string, list of strings), apply first element as
+	               a formatter for the strings in the list.
+	j              Integer. The index of the state in the DiagDataPoint.
+
+	Returns:
+	bandlabel      String, possibly empty.
+	"""
+	if bandlabels is None:
+		bandlabel = diagdatapoint.char[j] if diagdatapoint.char is not None and '?' not in diagdatapoint.char[j] else diagdatapoint.bindex[j] if diagdatapoint.bindex is not None else ("%i" % j)
+	elif isinstance(bandlabels, str):
+		bandlabel = bandlabels
+	elif isinstance(bandlabels, (list, np.ndarray)):
+		bandlabel = bandlabels[j]
+	elif isinstance(bandlabels, tuple) and len(bandlabels) == 2 and isinstance(bandlabels[0], str) and isinstance(bandlabels[1], (list, np.ndarray)):
+		bandlabel = bandlabels[0] % bandlabels[1][j]
+	else:
+		sys.stderr.write("Warning (ploto.get_bandlabel): Band label argument invalid.\n")
+		bandlabel = ""
+	return str(bandlabel).replace('-', '\u2212')
+
+def display_parameter_text(paramvalue, var = None, ax = None, text_y = 0.97):
+	"""
+	Display parameter text in the upper left corner in the form $param=value$.
+
+	Arguments:
+	paramvalue  None, dict, Vector instance, or numerical value. If a Vector or
+	            numerical value, show the value. If a dict, show '$key=value$'
+	            on subsequent lines. If None, do not show.
+	var         None or string. If paramvalue is a Vector instance, use this
+	            string as the variable name. If None, use 'k'. This argument is
+	            ignored if paramvalue is a dict instance.
+	ax          Matplotlib Axes object or None. If None, use the current Axes.
+	text_y      Float. Vertical coordinate of the text.
+
+	Returns:
+	text_y      Float. Vertical coordinate of the next line of text. It is
+	            decreased by a fixed value for every line of text.
+	"""
+	if ax is None:
+		ax = plt.gca()
+	if var is None:
+		var = 'k'
+	if isinstance(paramvalue, dict) and len(paramvalue) > 0:
+		for var in paramvalue:
+			if isinstance(paramvalue[var], Vector):
+				var1 = var.lower() if isinstance(var, str) and var.lower() in ['k', 'b'] else var
+				pname, pval = paramvalue[var].get_pname_pval(prefix = var1)
+				parstr = get_partext(pval, pname).replace('For ', 'At ')
+			elif isinstance(paramvalue[var], (int, np.integer, float, np.floating)):
+				parstr = "At $%s=%g$" % (str(var), paramvalue[var])
+			else:
+				parstr = "At $%s=%s$" % (str(var), str(paramvalue[var]))
+			ax.text(0.02, text_y, parstr, ha='left', va='top', transform=ax.transAxes)
+			text_y -= 0.07
+	elif isinstance(paramvalue, Vector):
+		var1 = var.lower() if isinstance(var, str) and var.lower() in ['k', 'b'] else var
+		pname, pval = paramvalue.get_pname_pval(prefix = var1)
+		parstr = get_partext(pval, pname).replace('For ', 'At ')
+		ax.text(0.02, text_y, parstr, ha='left', va='top', transform=ax.transAxes)
+		text_y -= 0.07
+	elif isinstance(paramvalue, (int, np.integer, float, np.floating)):
+		ax.text(0.02, text_y, "At $%s=%g$" % (str(var), paramvalue), ha='left', va='top', transform=ax.transAxes)
+		text_y -= 0.07
+	return text_y
+
+def reorder_legend(handles, labels, order = None):
+	"""Reorder legend handles and labels, and possibly insert empty spaces
+
+	Arguments:
+	handles   List of legend handles
+	labels    List of legend labels (str instances)
+	order     None or list of integers and None. If None, take from
+	          configuration option.
+
+	Returns:
+	handles_ordered   Reordered list of legend handles
+	labels_ordered    Reordered list of legend labels
+	"""
+	if order is None:
+		orb_order = get_config('plot_wf_orbitals_order', ['standard', 'paired', 'table'])
+		if orb_order == 'standard':  # standard order
+			order = [0, 1, 2, 3, 4, 5, 6, 7]
+		elif orb_order == 'paired':  # paired Gamma6,±1/2 Gamma8,±1/2
+			order = [0, 3, 2, 1, 4, 5, 6, 7]
+		elif orb_order == 'table':  # orbitals vertically, Jz horizontally ordered
+			order = [None, 0, 1, None, 2, 3, 4, 5, None, 6, 7]
+		else:
+			raise ValueError("Invalid value for configuration value 'plot_wf_orbitals_order'.")
+	handles_ordered = []
+	labels_ordered = []
+	for o in order:
+		if o is None:
+			emptyplot, = plt.plot(np.NaN, np.NaN, '-', color='none')
+			handles_ordered.append(emptyplot)
+			labels_ordered.append("")  # TODO: Fix alignment
+		elif not isinstance(o, int):
+			raise TypeError("Argument order must be a list containing integers or None.")
+		elif o >= 0 and o < len(handles) and o < len(labels):
+			handles_ordered.append(handles[o])
+			labels_ordered.append(labels[o])
+		# else: silently skip
+	# TODO: Empty elements at the end need to be deleted
+	return handles_ordered, labels_ordered
+
+### PLOT FUNCTIONS ###
+def wavefunction_z(
+		params, diagdatapoint, filename = "", absval = False, title = None,
+		eivalrange = None, num = None, bandlabels = None, display_k = None,
+		basis = None, basislabels = None, phase_rotate = True, ll_full = False,
+		remember = False):
+	"""Plot wave functions as function of z.
+	Separate by orbital and real/imarginary value.
+
+	Arguments:
+	params        PhysParams instance
+	diagdatapoint DiagDataPoint instance. For eigenvalues, eigenvectors, and
+	              labels.
+	filename      If None or empty string, then save to separate files.
+	              Otherwise, save a multipage PDF with the given filename.
+	absval        NOT USED
+	title         NOT USED
+	eivalrange    None or a 2-tuple. If set, do not plot wave functions for the
+	              states whose eigenvalues lie outside this range.
+	num           IGNORED
+	bandlabels    Labels that will be drawn on the plots. If None, determine
+	              automatically. If a string, use one label for all states. If a
+	              list or array of strings, use different labels for the states.
+	              If a tuple of the form (string, list of strings), apply first
+	              element as a formatter for the strings in the list.
+	display_k     None, dict or a Vector instance. If a Vector, show the value.
+	              If a dict, show '$key=value$' joined with commas. If None, do
+	              not show.
+	basis         Numpy array or matrix, shape (norb, norb), where norb is the
+	              number of orbitals. Expand the wave functions in this basis
+	              rather than the standard basis of orbitals. The matrix should
+	              contain the basis vectors as row vectors.
+	basislabels   List of strings. The expressions for the basis states. This
+	              may also be used for the standard basis, i.e., if argument
+	              basis is None.
+	phase_rotate  True, False, or 'k'. If True (default), multiply each
+	              eigenvector by a phase factor such that the value psi_i of
+	              largest magnitude is purely real with Re psi_i > 0. In case
+	              the phases are already set with DiagDataPoint.set_eivec_phase(),
+	              it is recommended to use False, so that the phase choice is
+	              not overwritten. If the value is 'k', then rotate according to
+	              the in-plane angle of the momentum.
+	ll_full       True or False. Set to True for full LL mode, else False. The
+	              effect is that the 'y' value at which the section is taken
+	              (in full LL mode, this is the LL index) is where the integral
+	              $\\int |\\psi(z, y)|^2 dz$ is maximal. In other cases, the
+	              section is taken at y = 0 (at the center).
+	remember      True or False. If False (default), close each figure with
+	              plt.close(). If True, do not close the figures, so that they
+	              can be modified in the future. The figures are saved
+	              regardless.
+
+	Note:
+	The arguments labelled as ignored, are included only to make the argument
+	lists between wavefunction_z() and abs_wavefunctions_z() identical.
+
+	Returns:
+	fig   List of figure numbers when successful. None if an error occurs, if
+	      there is no data, or Figure objects have been closed (if argument
+	      remember is False).
+	"""
+	eival = diagdatapoint.eival
+	eivecs = diagdatapoint.eivec
+	if eivecs is None:
+		sys.stderr.write("ERROR (ploto.wavefunction_z): Eigenvector data is missing.\n")
+		return None
+	nz = params.nz
+	ny = params.ny
+	dz = params.zres
+	norb = params.norbitals
+	suppress_character_warning = (diagdatapoint.k != 0)
+	phase_rotate_warning_given = False
+
+	if eivecs.shape[0] == norb * ny * nz:		# for 1D
+		# neig = eivecs.shape[1]
+		eivecs = eivecs.T
+	elif eivecs.shape[1] == norb * ny * nz:	    # for 1D, inverted order
+		# neig = eivecs.shape[0]
+		pass
+	elif eivecs.shape[0] == norb * nz:			# for 2D
+		ny = 1
+		# neig = eivecs.shape[1]
+		eivecs = eivecs.T
+	elif eivecs.shape[1] == norb * nz:			# for 2D, inverted order
+		ny = 1
+		# neig = eivecs.shape[0]
+	else:
+		sys.stderr.write("ERROR (ploto.wavefunction_z): Eigenvectors have incorrect number of components\n")
+		exit(1)
+
+	colors = ['r', 'c', 'b', 'g', 'm', 'y', '#3fdf3f', '#ff7fff']
+	ls_p, ls_m = '-', (0, (1.3, 1.0))
+	orb_ls = [ls_p, ls_m, ls_p, ls_p, ls_m, ls_m, ls_p, ls_m]
+	if basislabels is None:
+		orblabels = orbital_labels(style = 'tex')
+	elif isinstance(basislabels, list):
+		if len(basislabels) < norb:
+			raise ValueError("Argument basislabels must have at least norb (%i) entries." % norb)
+		orblabels = basislabels
+	else:
+		raise TypeError("Argument basislabels must be None or a list of strings.")
+
+	if isinstance(basis, np.ndarray):
+		basis_mat = basis.conjugate()
+		if min(basis_mat.shape) < norb:
+			raise ValueError("Argument basis is a matrix of insufficient size")
+		elif max(basis_mat.shape) > norb:
+			sys.stderr.write("Warning (ploto.wavefunction_z): Matrix for argument basis is too large. Superfluous entries are discarded.\n")
+			basis_mat = basis_mat[:norb, :norb]
+		basis_mat = blockdiag(basis_mat, nz).tocsc()  # expand over the z coordinate
+	elif basis is None:
+		basis_mat = None
+	else:
+		raise TypeError("Argument basis must be a numpy array or matrix, or None.")
+
+	pdfpages = PdfPages(filename) if filename is not None and filename != "" else None
+	files_written = []
+	bandchar_failed = 0
+	sorted_idx = np.argsort(eival)
+	fignums = [None for j in sorted_idx]
+	coeff = diagdatapoint.get_eivec_coeff(norb, ll_full = ll_full, ny = ny)
+	for j in sorted_idx:
+		eivec = eivecs[j]
+		energy = eival[j]
+		if eivalrange is not None and isinstance(eivalrange, list) and len(eivalrange) == 2 and (energy < min(eivalrange) or energy > max(eivalrange)):
+			continue
+
+		if eivec.shape[0] == norb * ny * nz and ny > 1:	 # for 1D
+			eivec0 = np.reshape(eivec, (ny, norb * nz))
+			if ll_full:
+				abseivec2 = np.abs(eivec0)**2
+				ny_sect = np.argmax(np.sum(abseivec2, axis = 1))
+			else:
+				ny_sect = ny // 2  # take a section in the middle
+			eivec = eivec0[ny_sect, :]
+			nrm = np.vdot(eivec, eivec)
+			if not ll_full:
+				eivec /= np.sqrt(nrm)  # normalize only for proper 1D, but not LL
+			one_d = True
+			# print (eivec.shape)
+		elif eivec.shape[0] == norb * nz:			# for 2D
+			one_d = False
+		else:
+			sys.stderr.write("ERROR (ploto.wavefunction_z): Eigenvectors have incorrect number of components\n")
+			exit(1)
+
+		# Apply basis transformation
+		if basis is not None:
+			eivec = basis_mat @ eivec
+
+		z = params.zvalues_nm()
+		zint = params.interface_z_nm()
+
+		fig = plt.figure(get_fignum(), figsize = get_plot_size('s'))
+		plt.subplots_adjust(**get_plot_size('subplot'))
+		ax = fig.add_subplot(1, 1, 1)
+		plt.plot([z.min(), z.max()], [0, 0], 'k-')
+		allplots = []
+		legendlabels = []
+
+		phase = 1
+		if phase_rotate is True:
+			# Try to make largest component purely real
+			psimax = eivec[np.argmax(np.abs(eivec))]
+			phase = psimax / abs(psimax)
+		elif phase_rotate == 'k':
+			# Rotate by momentum vector phase (slightly experimental/heuristic)
+			if isinstance(diagdatapoint.k, Vector):
+				orbmax = np.argmax(np.abs(eivec)) % norb
+				k, kphi = diagdatapoint.k.polar(deg = False, fold = True)
+				if abs(k) < 1e-7:
+					kphi = 0
+				elif k < 0:  # not sure why this is necessary, in view of 'fold = true'
+					kphi = np.mod(kphi, 2 * np.pi) - np.pi
+				jz = np.array([0.5, -0.5, 1.5, 0.5, -0.5, -1.5, 0.5, -0.5][orbmax])
+				phase = np.exp(1.j * jz * kphi)
+			elif not phase_rotate_warning_given:
+				phase_rotate_warning_given = True
+				sys.stderr.write("Warning (ploto.wavefunction_z): Rotation by momentum phase was requested, but momentum not given as Vector instance.\n")
+
+		phases = np.angle(coeff[j] / phase, deg = False)
+		orbsel = []
+
+		for b in range(0, norb):
+			psi = eivec[b::norb]
+			psi2 = np.vdot(psi, psi)
+			orblabel = "$|" + orblabels[b].strip('$').lstrip('$') + "\\rangle$"
+			if psi2 > 5e-3:
+				re_max = np.amax(np.abs(np.real(psi / phase)))
+				im_max = np.amax(np.abs(np.imag(psi / phase)))
+				if get_config_bool('plot_wf_orbitals_realshift'):  # Plot all orbital components shifted to real functions
+					thisplot, = plt.plot(z, np.real(psi * np.exp(-1j * phases[b]) / phase) / np.sqrt(dz),
+					                     linestyle = orb_ls[b], color = colors[b])  # Note normalization
+				elif im_max < 1e-5:  # purely real
+					thisplot, = plt.plot(z, np.real(psi / phase) / np.sqrt(dz), '-', color = colors[b])  # Note normalization
+				elif re_max < 1e-5:  # purely imaginary
+					thisplot, = plt.plot(z, np.imag(psi / phase) / np.sqrt(dz), '--', color = colors[b])  # Note normalization
+				else:  # general complex
+					if np.amax(np.abs(np.real(psi / phase) - np.imag(psi / phase)) / np.sqrt(dz)) < 1e-5:  # overlapping re and im curves: dashdot
+						thisplot, = plt.plot(z, np.real(psi / phase) / np.sqrt(dz), '-.', color = colors[b])
+					else:  # non-overlapping re and im curves: solid and dashed
+						thisplot, = plt.plot(z, np.real(psi / phase) / np.sqrt(dz), '-', color = colors[b])  # Note normalization
+						plt.plot(z, np.imag(psi / phase) / np.sqrt(dz), '--', color = colors[b])   # Note normalization
+				allplots.append(thisplot)
+				legendlabels.append(orblabel + (" %i%%" % np.floor(np.real(psi2) * 100 + 0.5)))
+				orbsel.append(True)
+
+			else:
+				thisplot, = plt.plot(np.NaN, np.NaN, '-', color='none')
+				# allplots.append(None)
+				allplots.append(thisplot)
+				legendlabels.append(orblabel + (" %i%%" % 0))
+				orbsel.append(False)
+
+		text_y = 0.76
+		for osel, phi, col in zip(orbsel, np.rad2deg(phases), colors):
+			if osel:
+				ax.text(0.98, text_y, "%4i\u00b0" % np.round(phi), ha='right', va='top', color = col, transform=ax.transAxes)
+				text_y -= 0.04
+
+		bandlabel = get_bandlabel(diagdatapoint, bandlabels, j)
+		if bandlabels is None and bandlabel == '??' or bandlabel == '':
+			bandchar_failed += 1
+
+		# ymax = 0.5
+		# Estimate well width and subsequently an estimate for the maximum of psi(z)
+		l_well = z.max() - z.min() if params.nlayer <= 1 else zint[2] - zint[1] if params.nlayer <= 3 else zint[-2] - zint[1]
+		ymax = np.sqrt(2.0 / l_well)
+		for zi in zint[1:-1]:
+			plt.plot([zi, zi], [-ymax, ymax], 'k:')
+		plt.axis([z.min(), z.max(), -1.2*ymax, 1.8*ymax])
+
+		allplots_sorted, legendlabels_sorted = reorder_legend(allplots, legendlabels, order = None)
+		if norb == 8:
+			ax.legend(handles = allplots_sorted, labels = legendlabels_sorted, loc='upper right', ncol = 3, fontsize = 'small', columnspacing = 1.0, handlelength = 1.6, handletextpad = 0.5)
+		else:
+			ax.legend(handles = allplots_sorted, labels = legendlabels_sorted, loc='upper right', ncol = 2)
+
+		# if (title is not None) and (title != ""):
+		title = "$E=%.3f\\;\\mathrm{meV}$" % energy
+		text_y = 0.97
+		ax.text(0.02, text_y, title, ha='left', va='top', transform=ax.transAxes)
+		text_y -= 0.07
+		if one_d:
+			if ll_full:
+				text_y = display_parameter_text(ny_sect - 2, var = r'\mathrm{LL}', ax = ax, text_y = text_y)
+				ax.text(0.02, text_y, r'$|\psi_\mathrm{LL}|^2 = %.4f$' % np.real(nrm), ha='left', va='top', transform=ax.transAxes)
+				text_y -= 0.07
+			else:
+				text_y = display_parameter_text(0, var = 'y', ax = ax, text_y = text_y)
+		else:
+			text_y = display_parameter_text(display_k, ax = ax, text_y = text_y)
+		ax.text(0.02, text_y, bandlabel, ha='left', va='top', transform=ax.transAxes)
+
+		# material labels
+		mat_lab_rot = get_config_num('plot_wf_mat_label_rot')
+		mat_min_thick = get_config_num('plot_wf_mat_min_thick_label')
+		for n in range(0, params.nlayer):
+			d = params.layerstack.thicknesses_z[n]
+			if d > (z.max() - z.min()) * mat_min_thick:
+				mat = params.layerstack.materials[n]['material'].format('tex')
+				zl = 0.5 * (params.layerstack.zinterface_nm[n] + params.layerstack.zinterface_nm[n + 1]) - 0.5 * params.lz_thick
+				ax.text((zl - z.min()) / (z.max() - z.min()), 0.05, mat, ha='center', va='bottom', rotation=mat_lab_rot, transform=ax.transAxes)
+
+		# if filename != "" and filename is not None:
+
+		elabel = ('+%03i' % (np.floor(energy + 0.5))) if energy > 0 else ('-%03i' % (-np.floor(energy + 0.5)))
+
+		plt.xlabel("$z$")
+		plt.ylabel("$\\psi_i(z)$")
+
+		fileid = 1
+		while [elabel, bandlabel.lower(), fileid] in files_written:
+			fileid += 1
+		files_written.append([elabel, bandlabel.lower(), fileid])
+
+		if pdfpages is None:
+			plt.savefig("wf_%smev_%s%s.pdf" % (elabel, bandlabel.lower().replace("?", "x").replace(" ", ""), "" if fileid == 1 else fileid))
+		else:
+			pdfpages.savefig(fig)
+		fignums[j] = fig.number
+		if not remember:
+			plt.close()
+
+	if bandchar_failed > 0 and not suppress_character_warning:
+		sys.stderr.write("Warning (ploto.wavefunction_z): Cannot determine band character for %i wave functions.\n" % bandchar_failed)
+	if pdfpages is not None:
+		pdfpages.close()
+
+	return fignums if remember else None
+
+def abs_wavefunctions_z(
+		params, diagdatapoint, filename = "", absval = False, title = None,
+		eivalrange = None, num = 12, bandlabels = None, display_k = None,
+		basis = None, basislabels = None, phase_rotate = True, ll_full = False,
+		remember = False):
+	"""Plot wave functions (absolute value squared) as function of z.
+	Plot multiple states together.
+
+	Arguments:
+	params         PhysParams instance
+	diagdatapoint  DiagDataPoint instance. For eigenvalues, eigenvectors, and
+	               labels.
+	filename       Output filename. If None or the empty string, save to a
+	               default filename.
+	absval         NOT USED
+	title          NOT USED
+	eivalrange     None or a 2-tuple. If set, do not plot wave functions for the
+	               states whose eigenvalues lie outside this range.
+	num            The number of states to be plotted. These will be the states
+	               closest to the centre of eivalrange (or to 0 if eivalrange is
+	               None).
+	bandlabels     Labels that will be drawn on the plots. If None, determine
+	               automatically. If a string, use one label for all states. If
+	               a list or array of strings, use different labels for the
+	               states. If a tuple of the form (string, list of strings),
+	               apply first element as a formatter for the strings in the
+	               list.
+	basis          IGNORED
+	basislabels    IGNORED
+	phase_rotate   IGNORED
+	display_k      None, dict or a Vector instance. If a Vector, show the value.
+	               If a dict, show '$key=value$' joined with commas. If None, do
+	               not show.
+	ll_full        True or False. Set to True for full LL mode, else False. The
+	               effect is that the 'y' value at which the section is taken
+	               (in full LL mode, this is the LL index) is where the integral
+	               $\\int |\\psi(z, y)|^2 dz$ is maximal. In other cases, the
+	               section is taken at y = 0 (at the center).
+	remember       True or False. If False (default), close the figure with
+	               plt.close(). If True, do not close the figure, so that it can
+	               be modified in the future. The figure is saved regardless.
+
+	Note:
+	The arguments labelled as ignored, are included only to make the argument
+	lists between wavefunction_z() and abs_wavefunctions_z() identical.
+	
+	Returns:
+	fig   Figure number when successful. None if an error occurs, if there is no
+	      data, or Figure object has been closed (if argument remember is
+	      False).
+	"""
+	eival = diagdatapoint.eival
+	eivecs = diagdatapoint.eivec
+	if eivecs is None:
+		sys.stderr.write("ERROR (ploto.abs_wavefunctions_z): Eigenvector data is missing.\n")
+		return None
+	nz = params.nz
+	ny = params.ny
+	dz = params.zres
+	norb = params.norbitals
+	suppress_character_warning = (diagdatapoint.k != 0)
+
+	if eivecs.shape[0] == norb * ny * nz:		# for 1D
+		# neig = eivecs.shape[1]
+		eivecs = eivecs.T
+	elif eivecs.shape[1] == norb * ny * nz:	    # for 1D, inverted order
+		# neig = eivecs.shape[0]
+		pass
+	elif eivecs.shape[0] == norb * nz:			# for 2D
+		ny = 1
+		# neig = eivecs.shape[1]
+		eivecs = eivecs.T
+	elif eivecs.shape[1] == norb * nz:			# for 2D, inverted order
+		ny = 1
+		# neig = eivecs.shape[0]
+	else:
+		sys.stderr.write("ERROR (ploto.abs_wavefunctions_z): Eigenvectors have incorrect number of components\n")
+		exit(1)
+
+	if eivalrange is None:
+		e0 = 0.0
+		sel = np.argsort(np.abs(eival - e0))  # [:min(neig, num)]
+		if num < len(sel):
+			sel = sel[:num]   # restrict to maximum number
+		order = np.argsort(eival[sel])
+		sel = sel[order]
+	else:
+		e0 = (min(eivalrange) + max(eivalrange)) // 2
+		sel = np.argsort(np.abs(eival - e0))  # sort by distance to e0
+		sel = sel[(eival[sel] >= min(eivalrange)) & (eival[sel] <= max(eivalrange))]  # restrict to eigenvalue range
+		if num < len(sel):
+			sel = sel[:num]   # restrict to maximum number
+		order = np.argsort(eival[sel])
+		sel = sel[order]
+
+	if len(sel) == 0:
+		sys.stderr.write("Warning (ploto.abs_wavefunctions_z): No eigenstates to be plotted\n")
+		return None
+
+	colors = ['r', 'c', 'b', 'g', 'm', 'y']
+	styles = ['-', '--', ':', '-.']
+	allplots = []
+	legendlabels = []
+	jj = 0
+	z = params.zvalues_nm()
+	zint = params.interface_z_nm()
+	ymax = 0.0
+
+	fig = plt.figure(get_fignum(), figsize = get_plot_size('s'))
+	plt.subplots_adjust(**get_plot_size('subplot'))
+	ax = fig.add_subplot(1, 1, 1)
+
+	psi2_prev = None
+	energy_prev = None
+	bandlabel_prev = None
+	bandchar_failed = 0
+	for j in sel:
+		eivec = eivecs[j]
+		energy = eival[j]
+		bandlabel = get_bandlabel(diagdatapoint, bandlabels, j)
+		elabel = ('+%03i' % (np.floor(energy + 0.5))) if energy > 0 else ('-%03i' % (-np.floor(energy + 0.5)))
+
+		if eivec.shape[0] == norb * ny * nz and ny > 1:  # for 1D
+			eivec0 = np.reshape(eivec, (ny, norb * nz))
+			if ll_full:
+				abseivec2 = np.abs(eivec0)**2
+				ny_sect = np.argmax(np.sum(abseivec2, axis = 1))
+			else:
+				ny_sect = ny // 2  # take a section in the middle
+			eivec = eivec0[ny_sect, :]
+			nrm = np.vdot(eivec, eivec)
+			if not ll_full:
+				eivec /= np.sqrt(nrm)  # normalize only for proper 1D, but not LL
+			eivec /= np.sqrt(nrm)
+			one_d = True
+			# print (eivec.shape)
+		elif eivec.shape[0] == norb * nz:			# for 2D
+			one_d = False
+		else:
+			sys.stderr.write("ERROR (ploto.abs_wavefunctions_z): Eigenvectors have incorrect number of components\n")
+			exit(1)
+
+		eivec2 = np.real(eivec.conjugate() * eivec)  # Not a matrix multiplication!
+		eivec2a = eivec2.reshape(nz, norb, order = 'C')
+		psi2 = np.sum(eivec2a, axis = 1) / dz
+
+		# check if eigenstate is "twin" of previous one
+		if psi2_prev is not None and bandlabels is None:
+			equal_energy = abs(energy_prev - energy) <= 0.1
+			equal_bandlabel = (bandlabel[0] != '?' and bandlabel_prev[0] != '?') and (bandlabel[:-1] == bandlabel_prev[:-1]) and (bandlabel[-1] + bandlabel_prev[-1] in ['+-', '-+'])
+			if equal_energy and equal_bandlabel:
+				psi2diff = np.abs(psi2_prev - psi2)
+				if np.amax(psi2diff) < 1e-4:
+					legendlabels[-1] = legendlabels[-1][:-1] + '\u00B1'  # "+-" plus-minus
+					continue  # do not add plot
+
+		psi2_prev = psi2
+		energy_prev = energy
+		bandlabel_prev = bandlabel
+
+		p, = plt.plot(z, psi2, colors[jj % 6] + styles[(jj % 24) // 6])
+		allplots.append(p)
+
+		legendlabels.append(elabel + " " + bandlabel)
+
+		ymax = max(ymax, np.amax(psi2))
+		jj += 1
+
+	if bandchar_failed > 0 and not suppress_character_warning:
+		sys.stderr.write("Warning (ploto.abs_wavefunctions_z): Cannot determine band character for %i wave functions.\n" % bandchar_failed)
+
+	plt.plot([z.min(), z.max()], [0, 0], 'k-')
+	for zi in zint[1:-1]:
+		plt.plot([zi, zi], [-0.1 * ymax, 1.1 * ymax], 'k:')
+	plt.axis([z.min(), z.max(), -0.2 * ymax, 1.3 * ymax])
+	set_xlabel('$z$', '$\\mathrm{nm}$')
+	plt.ylabel('$|\\psi(z)|^2$')
+
+	ax.legend(handles = allplots, labels = legendlabels, loc='upper right', ncol=2)
+
+	# if (title is not None) and (title != ""):
+
+	title = "$%.3f\\;\\mathrm{meV}\\leq E \\leq %.3f\\;\\mathrm{meV}$" % (min(eival[sel]), max(eival[sel]))
+	text_y = 0.97
+	ax.text(0.02, text_y, title, ha='left', va='top', transform=ax.transAxes)
+	text_y -= 0.07
+	if one_d:
+		if ll_full:
+			ax.text(0.02, text_y, r"$\mathrm{LL}$ with $\max|\psi_\mathrm{LL}|^2$", ha='left', va='top', transform=ax.transAxes)
+			text_y -= 0.07
+		else:
+			text_y = display_parameter_text(0, var = 'y', ax = ax, text_y = text_y)
+	else:
+		text_y = display_parameter_text(display_k, ax = ax, text_y = text_y)
+
+	# material labels
+	for n in range(0, params.nlayer):
+		d = params.layerstack.thicknesses_z[n]
+		if d > (z.max() - z.min()) * 0.15:
+			mat = params.layerstack.materials[n]['material'].format('tex')
+			zl = 0.5 * (params.layerstack.zinterface_nm[n] + params.layerstack.zinterface_nm[n + 1]) - 0.5 * params.lz_thick
+			ax.text((zl - z.min()) / (z.max() - z.min()), 0.05, mat, ha='center', va='bottom', transform=ax.transAxes)
+
+	if filename != "" and filename is not None:
+		plt.savefig(filename)
+	if not remember:
+		plt.close()
+
+	return fig.number if remember else None
+
+
+def abs_wavefunctions_y(params, diagdatapoint, filename = "", absval = False, title = None, eivalrange = None, bandlabels = None, overlap_eivec = None, obsy = None, display_k = None, magn = None, remember = False):
+	"""Plot wave functions (absolute value squared) as function of y.
+	Generate a multipage PDF where each figure represents a state. Decompose the
+	states into orbitals or subbands.
+
+	Arguments:
+	params       PhysParams instance
+	diagdatapoint  DiagDataPoint instance. For eigenvalues, eigenvectors, and
+	               labels.
+	filename     If None or empty string, then save to separate files.
+	             Otherwise, save a multipage PDF with the given filename.
+	absval       NOT USED
+	title        NOT USED
+	eivalrange   None or a 2-tuple. If set, do not plot wave functions for the
+	             states whose eigenvalues lie outside this range.
+	bandlabels   Labels that will be drawn on the plots. If None, do not show.
+	             If a string, use one label for all states. If a list or array
+	             of strings, use different labels for the states. If a tuple of
+	             the form (string, list of strings), apply first element as
+	             a formatter for the strings in the list.
+	overlap_eivec  A dict instance. The keys are the subband labels, the values
+	               are arrays representing the eigenvector. If given, decompose
+	               the state into subbands. If set to None, decompose into the
+	               orbitals.
+	obsy         An array of dimension 1 or 2 that contains the observable
+	             values <y> (and <y^2>) for the states. If one-dimensional or
+	             two-dimensional with one row, the values are interpreted as
+	             <y>. If two-dimensional with a second row, the second row are
+	             the values of <y^2>. For each state, show the values <y> and
+	             sigma_y also if <y^2> is given.
+	display_k    None, dict or a Vector instance. If a Vector, show the value.
+	             If a dict, show '$key=value$' joined with commas. If None, do
+	             not show.
+	magn         Numeric value (float or int). If set, use this magnetic field
+	             value for scaling the vertical axis, if the scaling type
+	             (configuration value 'plot_wf_y_scale') is 'magn'.
+	remember     True or False. If False (default), close each figure with
+	             plt.close(). If True, do not close the figures, so that they
+	             can be modified in the future. The figures are saved
+	             regardless.
+
+	Returns:
+	fig   List of figure numbers when successful. None if an error occurs, if
+	      there is no data, or Figure objects have been closed (if argument
+	      remember is False).
+	"""
+	remember = False  # TODO
+	eival = diagdatapoint.eival
+	eivecs = diagdatapoint.eivec
+	if eivecs is None:
+		sys.stderr.write("ERROR (ploto.abs_wavefunctions_y): Eigenvector data is missing.\n")
+		return None
+	nz = params.nz
+	ny = params.ny
+	dy = params.yres
+	norb = params.norbitals
+	y = params.yvalues_nm()
+
+	if eivecs.shape[0] == norb * ny * nz:		# for 1D
+		# neig = eivecs.shape[1]
+		eivecs = eivecs.T
+	elif eivecs.shape[1] == norb * ny * nz:	    # for 1D, inverted order
+		# neig = eivecs.shape[0]
+		pass
+	elif eivecs.shape[0] == norb * nz:			# for 2D
+		ny = 1
+		# neig = eivecs.shape[1]
+		eivecs = eivecs.T
+	elif eivecs.shape[1] == norb * nz:			# for 2D, inverted order
+		ny = 1
+		# neig = eivecs.shape[0]
+	else:
+		sys.stderr.write("ERROR (ploto.abs_wavefunctions_y): Eigenvectors have incorrect number of components\n")
+		exit(1)
+
+	if ny <= 1:
+		return None
+
+	pdfpages = PdfPages(filename) if filename is not None and filename != "" else None
+	files_written = []
+	orbcolors = ['r', 'c', 'b', 'g', 'm', 'y', '#3fdf3f', '#ff7fff']
+	orblabels = orbital_labels(style = 'tex')
+	# Determine colors for subband overlap
+	if overlap_eivec is not None:
+		ov_bands = []
+		for ov in overlap_eivec:
+			if len(ov) >= 3 and ov[0] in 'eElLhH' and ov[1] in '123456789':
+				ov1 = ov[0].upper() + ov[1]
+				if ov1 not in ov_bands:
+					ov_bands.append(ov1)
+		ov_bands = sorted(ov_bands)
+		subcolors = {}
+		if len(ov_bands) == 2:
+			subcolors[ov_bands[0]] = 'r'
+			subcolors[ov_bands[1]] = 'b'
+		elif len(ov_bands) <= 6:
+			for jj in range(0, len(ov_bands)):
+				subcolors[ov_bands[jj]] = 'rgbymc'[jj]
+		else:
+			for jj in range(0, len(ov_bands)):
+				subcolors[ov_bands[jj]] = hsl_to_rgb([jj / len(ov_bands), 1.0, 0.5])
+
+	# Vertical scale: vertical range is [-0.2 * vmax, 1.3 * vmax]
+	vscale = get_config('plot_wf_y_scale', choices = ['size', 'width', 'magn', 'separate', 'together'])
+	if vscale in ['size', 'width']:
+		vmax = 2.5 / params.ly_width
+	elif vscale == 'magn':
+		if not isinstance(magn, (float, np.floating, int, np.integer)):
+			sys.stderr.write("Warning (ploto.abs_wavefunctions_y): Scaling by magnetic length requires magnetic field value to be numeric.\n")
+			vmax = 2.5 / params.ly_width
+		else:
+			print('size:', 2.5 / params.ly_width, '| magn:', 1.25 * np.sqrt(eoverhbar * abs(magn) / np.pi))
+			vmax = max(2.5 / params.ly_width, 1.25 * np.sqrt(eoverhbar * abs(magn) / np.pi))
+	elif vscale == 'separate':
+		pass
+	elif vscale == 'together':
+		vmax = 0.0
+		for eivec in eivecs:
+			psi = np.reshape(eivec, (ny, nz * norb))
+			psi2max = np.amax(np.sum(np.abs(psi)**2, axis = 1))
+			vmax = max(vmax, 1.1 * psi2max / dy)
+	else:
+		sys.stderr.write("Warning (ploto.abs_wavefunctions_y): Invalid value for configuration option 'plot_wf_y_scale'. Use default 'size'.\n")
+		vmax = 2.5 / params.ly_width
+
+	sorted_idx = np.argsort(eival)
+	fignums = [None for _ in sorted_idx]
+	for j in sorted_idx:
+		eivec = eivecs[j]
+		energy = eival[j]
+		if eivalrange is not None and isinstance(eivalrange, list) and len(eivalrange) == 2 and (energy < min(eivalrange) or energy > max(eivalrange)):
+			continue
+
+		fig = plt.figure(get_fignum(), figsize = get_plot_size('s'))
+		plt.subplots_adjust(**get_plot_size('subplot'))
+		ax = fig.add_subplot(1, 1, 1)
+		plt.plot([y.min(), y.max()], [0, 0], 'k-')
+		allplots = []
+		legendlabels = []
+		if overlap_eivec is None:   # Orbital overlap
+			eivec = np.reshape(eivec, (ny, nz, norb))
+			# print()
+			for b in range(0, norb):
+				psi = eivec[:, :, b]
+				# print ('%i:' %(b+1), psi.shape, '->',)
+				psi2 = np.sum(np.abs(psi)**2, axis = 1)
+				# print (psi2.shape, 'sum=', psi2.sum())
+				orblabel = "$|" + orblabels[b].strip('$').lstrip('$') + "\\rangle$"
+				if psi2.sum() > 5e-3:
+					thisplot, = plt.plot(y, psi2 / dy, '-', color = orbcolors[b])
+					allplots.append(thisplot)
+					legendlabels.append(orblabel + (" %i%%" % np.floor(psi2.sum() * 100 + 0.5)))
+
+				else:
+					thisplot, = plt.plot(np.NaN, np.NaN, '-', color='none')
+					allplots.append(thisplot)
+					legendlabels.append(orblabel + (" %i%%" % 0))
+			# total
+			psi2 = np.sum(np.abs(eivec)**2, axis = (1, 2))
+			thisplot, = plt.plot(y, psi2 / dy, 'k-')
+			allplots.append(thisplot)
+			legendlabels.append("sum")
+			if vscale == 'separate':
+				vmax = 1.1 * np.amax(psi2) / dy
+		else:   # Subband overlap
+			eivec = np.reshape(eivec, (ny, nz * norb))
+			absv2 = np.sum(np.abs(eivec)**2)
+			total_ei = np.sum(np.abs(eivec)**2, axis=1) / absv2
+			total_ov = np.zeros_like(total_ei)
+			for ov in overlap_eivec:      # overlap_eivec should be a dict
+				ovec = overlap_eivec[ov]  # this is the data; argument ov is the label
+				sublabel = ov[0:2] if len(ov) >= 2 else ''
+				col = subcolors.get(sublabel, 'k')
+				fmt = '-' if '+' in ov else '--' if '-' in ov else ':'
+				absw2 = np.sum(np.abs(ovec)**2)
+				psi = np.inner(eivec.conjugate(), ovec)
+				# print ('%i (%s):' % (jj+1, ov), eivec.shape, ovec.shape, '->', psi.shape, '->')
+				psi2 = np.abs(psi)**2 / absv2 / absw2
+				total_ov += psi2
+				# print (psi2.shape)
+				if psi2.sum() > 5e-3:
+					thisplot, = plt.plot(y, psi2 / dy, fmt, color = col)
+					allplots.append(thisplot)
+					legendlabels.append(ov + (" %i%%" % np.floor(psi2.sum() * 100 + 0.5)))
+				else:
+					thisplot, = plt.plot(np.NaN, np.NaN, fmt, color='none')
+					allplots.append(thisplot)
+					legendlabels.append(ov + (" %i%%" % 0))
+			other_ov = total_ei - total_ov
+			if other_ov.sum() > 5e-3:
+				thisplot, = plt.plot(y, other_ov / dy, 'k:')
+				allplots.append(thisplot)
+				legendlabels.append("other" + (" %i%%" % np.floor(other_ov.sum() * 100 + 0.5)))
+			else:
+				thisplot, = plt.plot(np.NaN, np.NaN, ':', color='none')
+				allplots.append(thisplot)
+				legendlabels.append("other" + (" %i%%" % 0))
+			thisplot, = plt.plot(y, total_ei / dy, 'k-')
+			allplots.append(thisplot)
+			legendlabels.append("sum")
+			if vscale == 'separate':
+				vmax = 1.1 * np.amax(total_ei) / dy
+
+		plt.axis([y.min(), y.max(), -0.2 * vmax, 1.3 * vmax])
+
+		if overlap_eivec is not None:
+			sortedlabelsp = sorted([ll for ll in legendlabels if '+' in ll])
+			sortedlabelsm = sorted([ll for ll in legendlabels if '-' in ll])
+			otherlabel = [ll for ll in legendlabels if 'other' in ll]
+			sumlabel = [ll for ll in legendlabels if 'sum' in ll]
+			sortedlabels = sortedlabelsp + otherlabel + sortedlabelsm + sumlabel
+			sortedhandles = [allplots[legendlabels.index(ll)] for ll in sortedlabels]
+			sortedlabels = [ll.replace('-', '\u2212') for ll in sortedlabels]
+			ax.legend(handles = sortedhandles, labels = sortedlabels, loc='upper right', ncol = 2, fontsize = 'small', columnspacing = 1.0, handlelength = 1.6, labelspacing = None if len(sortedlabels) <= 8 else 0.15, handletextpad = 0.5)
+		elif norb == 8:
+			ax.legend(handles = allplots, labels = legendlabels, loc='upper right', ncol = 3, fontsize = 'small', columnspacing = 1.0, handlelength = 1.6, handletextpad = 0.5)
+		else:
+			ax.legend(handles = allplots, labels = legendlabels, loc='upper right', ncol = 2)
+
+		bandlabel = '??'
+		title = "$E=%.3f\\;\\mathrm{meV}$" % energy
+		ax.text(0.02, 0.97, title, ha='left', va='top', transform=ax.transAxes)
+		# ax.text(0.02, 0.90, bandlabel, ha='left', va='top', transform=ax.transAxes)
+		display_parameter_text(display_k, ax = ax, text_y = 0.90)
+		yavglabel = ''
+		if isinstance(obsy, np.ndarray) and obsy.ndim == 2:
+			if obsy.shape[0] >= 1 and abs(obsy[0, j]) > 0.3:
+				yavglabel = "$\\langle y\\rangle = %.1f\\,\\mathrm{nm}$" % (np.real(obsy[0, j]) * params.ly_width)
+			if obsy.shape[0] >= 2 and abs(obsy[0, j]) > 0.3:
+				sigmay = np.sqrt(np.real(obsy[1, j]) - np.real(obsy[0, j])**2)
+				yavglabel += ", $\\sigma_y = %.1f\\,\\mathrm{nm}$" % (sigmay * params.ly_width)
+		elif isinstance(obsy, np.ndarray) and obsy.ndim == 1 and abs(obsy[j]) > 0.3:
+			yavglabel = "$\\langle y\\rangle = %.1f\\,\\mathrm{nm}$" % (np.real(obsy[j]) * params.ly_width)
+		if len(yavglabel) > 0:
+			ax.text(0.02, 0.83, yavglabel, ha='left', va='top', transform=ax.transAxes)
+
+		elabel = ('+%03i' % (np.floor(energy + 0.5))) if energy > 0 else ('-%03i' % (-np.floor(energy + 0.5)))
+
+		set_xlabel("$y$", "$\\mathrm{nm}$")
+		plt.ylabel("$|\\psi_i|^2(y)$")
+
+		fileid = 1
+		while [elabel, bandlabel.lower(), fileid] in files_written:
+			fileid += 1
+		files_written.append([elabel, bandlabel.lower(), fileid])
+
+		if pdfpages is None:
+			plt.savefig("wf_y_%smev.%s_%s.pdf" % (elabel, bandlabel.lower().replace("?", "x"), fileid))
+		else:
+			pdfpages.savefig(fig)
+		fignums[j] = fig.number
+		if not remember:
+			plt.close()
+
+	if pdfpages is not None:
+		pdfpages.close()
+
+	return fignums if remember else None
+
+def wavefunction_zy(params, diagdatapoint, filename = "", absval = True, separate_bands = False, title = None, eivalrange = None, display_k = None, remember = False):
+	"""Plot wave functions as function of (z, y).
+	The colouring bmay be a color map for the absolute value squared, or a
+	colour mixing determined for displaying the orbital content. (Detailed
+	settings via configuration values.)
+
+	Arguments:
+	params       PhysParams instance
+	diagdatapoint  DiagDataPoint instance. For eigenvalues, eigenvectors, and
+	               labels.
+	filename     If None or empty string, then save to separate files.
+	             Otherwise, save a multipage PDF with the given filename.
+	absval       NOT USED
+	separate_bands  If False, use the absolute value square for the colouring.
+	                If True, mix colours depending on orbital composition.
+	title        NOT USED
+	eivalrange   None or a 2-tuple. If set, do not plot wave functions for the
+	             states whose eigenvalues lie outside this range.
+	display_k    None, dict or a Vector instance. If a Vector, show the value.
+	             If a dict, show '$key=value$' joined with commas. If None, do
+	             not show.
+	remember     True or False. If False (default), close each figure with
+	             plt.close(). If True, do not close the figures, so that they
+	             can be modified in the future. The figures are saved
+	             regardless.
+
+	Returns:
+	fig   List of figure numbers when successful. None if an error occurs, if
+	      there is no data, or Figure objects have been closed (if argument
+	      remember is False).
+	"""
+	eival = diagdatapoint.eival
+	eivecs = diagdatapoint.eivec
+	if eivecs is None:
+		sys.stderr.write("ERROR (ploto.wavefunction_zy): Eigenvector data is missing.\n")
+		return None
+	nz = params.nz
+	ny = params.ny
+	norb = params.norbitals
+	z = params.zvalues_nm(extend = 1)
+	zint = params.interface_z_nm()
+	y = params.yvalues_nm(extend = 1)
+
+	if eivecs.shape[0] == norb * ny * nz:    # for 1D
+		# neig = eivecs.shape[1]
+		eivecs = eivecs.T
+	elif eivecs.shape[1] == norb * ny * nz:	 # for 1D, inverted order
+		# neig = eivecs.shape[0]
+		pass
+	else:
+		sys.stderr.write("ERROR (ploto.wavefunction_zy): Eigenvectors have incorrect number of components; must be \"1D\", i.e., with two confinement directions\n")
+		exit(1)
+
+	mode = get_config('plot_wf_zy_format', choices = ['pdf', 'png', 'pngtopdf', 'png_to_pdf'])
+	pdfpages = PdfPages(filename) if mode == 'pdf' and filename is not None and filename != "" else None
+	if separate_bands:
+		color = get_config('plot_wf_zy_bandcolors', choices = ['hsl', 'hsv', 'rgb'])
+	else:
+		color = get_config('color_wf_zy')
+	scaletype = get_config('plot_wf_zy_scale', choices = ['separate', 'together'])
+
+	# Count labels for determination of file name patterns
+	files_written = []
+	n_elabels = {}
+	j_elabels = {}
+	for energy in eival:
+		elabel = ('+%03i' % (np.floor(energy + 0.5))) if energy > 0 else ('-%03i' % (-np.floor(energy + 0.5)))
+		if elabel in n_elabels:
+			n_elabels[elabel] += 1
+		else:
+			n_elabels[elabel] = 1
+			j_elabels[elabel] = 0
+
+	# Determine maximum of all eigenvectors
+	psi2_max_all = 0
+	for eivec in eivecs:
+		eivec2 = eivec.conjugate() * eivec
+		eivec2o = np.real(eivec2.reshape(ny * nz, norb))
+		if separate_bands:
+			psi2_max = np.amax(eivec2o)
+		else:
+			psi2_max = np.amax(np.sum(eivec2o, axis=1))
+		psi2_max_all = max(psi2_max_all, psi2_max)
+
+	sorted_idx = np.argsort(eival)
+	fignums = [None for _ in sorted_idx]
+	for j in sorted_idx:
+		eivec = eivecs[j]
+		energy = eival[j]
+		if eivalrange is not None and isinstance(eivalrange, list) and len(eivalrange) == 2 and (energy < min(eivalrange) or energy > max(eivalrange)):
+			continue
+
+		fig = plt.figure(get_fignum(), figsize = get_plot_size('s'))
+		plt.subplots_adjust(**get_plot_size('subplot'))
+		ax = fig.add_subplot(1, 1, 1)
+
+		eivec2 = eivec.conjugate() * eivec
+		if separate_bands:
+			psi2_zy_all = np.transpose(np.real(eivec2.reshape(ny, nz, norb)), (1, 0, 2))
+			# print (psi2_zy_all.shape)
+			psi2_zy_g6  = psi2_zy_all[:, :, 0] + psi2_zy_all[:, :, 1]
+			psi2_zy_g8h = psi2_zy_all[:, :, 2] + psi2_zy_all[:, :, 5]
+			psi2_zy_g8l = psi2_zy_all[:, :, 3] + psi2_zy_all[:, :, 4]
+			psi2_zy = np.sum(psi2_zy_all, axis=2)
+			# psi2_max = max(psi2_zy_g6.max(), psi2_zy_g8h.max(), psi2_zy_g8l.max())
+			psi2_max = psi2_zy.max()
+			vmax = psi2_max_all if scaletype == 'together' else psi2_max
+
+			# Extract R,G,B channels (for HSV and HSL models)
+			zz = np.zeros_like(psi2_zy)
+			rr = np.where(psi2_zy == 0, zz, psi2_zy_g6 / psi2_zy)
+			gg = np.where(psi2_zy == 0, zz, psi2_zy_g8l / psi2_zy)
+			bb = np.where(psi2_zy == 0, zz, psi2_zy_g8h / psi2_zy)
+			rgb = np.dstack((rr, gg, bb))
+
+			if color == 'hsv':
+				# HSV color model
+				hh = mplcolors.rgb_to_hsv(rgb)[:, :, 0]
+				hsv = np.dstack((hh, (psi2_zy / vmax)**2, np.ones_like(psi2_zy)))
+				rgb = mplcolors.hsv_to_rgb(hsv)
+			elif color == 'hsl':
+				# HSL color model (default)
+				hh = rgb_to_hsl(rgb)[:, :, 0]
+				hsl = np.dstack((hh, np.ones_like(psi2_zy), 1 - 0.5 * (psi2_zy / vmax)**2))
+				rgb = hsl_to_rgb(hsl)
+			elif color == 'rgb':
+				# Simple RGB color model (inversion is required to map zero to white)
+				rr = 1.0 - (psi2_zy_g8l + psi2_zy_g8h) / vmax  # 1 - anti-Gamma6
+				gg = 1.0 - (psi2_zy_g6 + psi2_zy_g8h) / vmax  # 1 - anti-Gamma8L
+				bb = 1.0 - (psi2_zy_g6 + psi2_zy_g8l) / vmax  # 1 - anti-Gamma8H
+				rgb = np.dstack((rr, gg, bb))
+			else:
+				raise ValueError("Invalid value for variable 'color'")
+
+			expval_y = np.sum(np.sum(psi2_zy, axis = 0) * params.yvalues_nm())
+			ax.imshow(np.clip(rgb, 0, 1), interpolation='none', extent=(y.min(), y.max(), z.min(), z.max()), aspect='auto', origin='lower')
+		else:
+			colormap = try_colormap(color)
+			psi2_zy = np.sum(np.real(eivec2.reshape(ny, nz, norb)), axis=2).transpose()
+			expval_y = np.sum(np.sum(psi2_zy, axis = 0) * params.yvalues_nm())
+			vmax = psi2_max_all if scaletype == 'together' else psi2_zy.max()
+			ax.imshow(np.clip(psi2_zy, 0.0, vmax), cmap=colormap, interpolation='none', extent=(y.min(), y.max(), z.min(), z.max()), aspect='auto', vmin=0.0, vmax=vmax, origin='lower')
+
+		ymax = y.max()
+		for zi in zint[1:-1]:
+			plt.plot([-ymax, ymax], [zi, zi], 'k:')
+		plt.plot([expval_y, expval_y], [z.min(), z.max()], 'r:')
+		plt.axis([-ymax * 1.05, ymax * 1.05, z.min()*1.05, z.max()*1.05])
+		set_ylabel('$z$', '$\\mathrm{nm}$')
+		set_xlabel('$y$', '$\\mathrm{nm}$')
+
+		# ax.legend(handles = allplots, labels = legendlabels, loc='upper right', ncol=2)
+
+		# if (title is not None) and (title != ""):
+		title = "$E=%.3f\\;\\mathrm{meV}$" % energy
+		ax.text(0.02, 0.97, title, ha='left', va='top', transform=ax.transAxes)
+		display_parameter_text(display_k, ax = ax, text_y = 0.90)
+
+		# material labels
+		for n in range(0, params.nlayer):
+			d = params.layerstack.thicknesses_z[n]
+			if d > (z.max() - z.min()) * 0.08:
+				mat = params.layerstack.materials[n]['material'].format('tex')
+				zl = 0.5 * (params.layerstack.zinterface_nm[n] + params.layerstack.zinterface_nm[n + 1]) - 0.5 * params.lz_thick
+				ax.text(0.97, (zl - z.min()) / (z.max() - z.min()), mat, ha='right', va='center', transform=ax.transAxes)
+
+		# build the filename
+		elabel = ('+%03i' % (np.floor(energy + 0.5))) if energy > 0 else ('-%03i' % (-np.floor(energy + 0.5)))
+		elabelj = ""
+		if elabel in n_elabels:
+			j_elabels[elabel] += 1
+			elabelj = "" if n_elabels[elabel] == 1 else ("_%01i" % j_elabels[elabel]) if n_elabels[elabel] < 10 else ("_%02i" % j_elabels[elabel]) if n_elabels[elabel] < 100 else ("_%i" % j_elabels[elabel])
+		fname = "wf_%smev%s.%s" % (elabel, elabelj, mode[0:3])  # latter argument is 'png' or 'pdf'
+		# and save
+		if pdfpages is not None:
+			pdfpages.savefig(fig)
+		else:
+			plt.savefig(fname)
+			files_written.append(fname)
+		fignums[j] = fig.number
+		if not remember:
+			plt.close()
+
+	if pdfpages is not None:
+		pdfpages.close()
+	if mode in ['pngtopdf', 'png_to_pdf']:
+		if not filename.endswith('.pdf'):
+			sys.stderr.write("Warning (ploto.wavefunction_zy): For creation of a multi-page PDF, the file name must end with '.pdf'.\n")
+			return None
+		sys.stderr.write("Run 'convert <files> %s' ...\n" % filename)
+		# f_stdout = open("pdflatex.log",'w')
+		try:
+			# subp.check_call(["pdflatex", "-interaction=batchmode", filename], stdout = f_stdout)
+			subp.check_call(['convert'] + files_written + [filename])
+		except OSError:
+			sys.stderr.write("convert is not available\n")
+		except:
+			sys.stderr.write("convert has failed; see pdflatex.log\n")
+		else:
+			sys.stderr.write("convert has completed successfully.\n")
+			if get_config_bool('plot_wf_delete_png'):
+				delete_intermediate_files(files_written)
+		# sys.stderr.write("Warning (ploto.wavefunction_zy): Creation of multi-page PDF from separate PNGs has failed: 'convert' not found.\n")
+	return fignums if remember else None
+
+def wf_add_bandlabels(params, diagdatapoint, filename = "", bandlabels = None, remember = False):
+	"""
+	Add band label to existing wave function plot.
+
+	This requires that the original plot function has been used with 'remember =
+	True' and that the figure reference/s has/have been stored in
+	diagdatapoint.wffigure. Presently, this only makes sense with the figures
+	from wavefunction_z().
+
+	Arguments:
+	params        NOT USED
+	diagdatapoint DiagDataPoint instance. For eigenvalues, eigenvectors, and
+	              labels.
+	filename      If None or empty string, then save to separate files.
+	              Otherwise, save a multipage PDF with the given filename.
+	bandlabels    Labels that will be drawn on the plots. If None, determine
+	              automatically. If a string, use one label for all states. If a
+	              list or array of strings, use different labels for the states.
+	              If a tuple of the form (string, list of strings), apply first
+	              element as a formatter for the strings in the list.
+	remember      True or False. If False (default), close each figure with
+	              plt.close(). If True, do not close the figures, so that they
+	              can be modified in the future. The figures are saved
+	              regardless.
+
+	No return value.
+	"""
+	files_written = []
+	if diagdatapoint.wffigure is None:
+		sys.stderr.write("Warning (ploto.wf_add_bandlabels): Adding band labels to existing figures was requested, but the figure data is absent. (For example, if remember = False was set in the plot function.)\n")
+		return
+	elif isinstance(diagdatapoint.wffigure, list):
+		pdfpages = PdfPages(filename) if filename is not None and filename != "" else None
+		figures = diagdatapoint.wffigure
+		sorted_idx = np.argsort(diagdatapoint.eival)
+	elif isinstance(diagdatapoint.wffigure, (int, np.integer, matplotlib.figure.Figure)):
+		pdfpages = None
+		figures = [diagdatapoint.wffigure]
+		sorted_idx = [0]
+	else:
+		raise TypeError("diagdatapoint.wffigure must be integer, matplotlib Figure object, list, or None.")
+
+	for j in sorted_idx:
+		if figures[j] is None:
+			continue
+		## NOTE: At the time of writing, each of the wave function plot
+		## functions above generates a single Axes object, but if this is
+		## changed, care should be taken to obtain the correct Axes object.
+		fig = plt.figure(figures[j])
+		ax = plt.gca()
+
+		# Detect which Text objects are in the upper left corner and determine
+		# a good position for the new text.
+		textobj = [ch for ch in ax.get_children() if isinstance(ch, matplotlib.text.Text)]
+		labeltext_y = []
+		for t in textobj:
+			tx, ty = t.get_position()
+			if abs(tx - 0.02) < 1e-6:
+				labeltext_y.append(ty)
+		text_y = 0.97 if len(labeltext_y) == 0 else min(labeltext_y) - 0.07
+		if text_y >= 0.03:  # otherwise it doesn't fit
+			bandlabel = get_bandlabel(diagdatapoint, bandlabels, j)
+			ax.text(0.02, text_y, bandlabel, ha='left', va='top', transform=ax.transAxes)
+		# TODO: Turn 'magic numbers' into named constants (globals for this .py file)
+
+		if pdfpages is not None:
+			pdfpages.savefig(fig)
+		else:
+			plt.savefig(filename)
+			files_written.append(filename)
+		if not remember:
+			plt.close()
+	if pdfpages is not None:
+		pdfpages.close()
+	if not remember:
+		diagdatapoint.wffigure = None
+	return
diff --git a/kdotpy-v1.0.0/src/kdotpy/postprocess.py b/kdotpy-v1.0.0/src/kdotpy/postprocess.py
new file mode 100644
index 0000000000000000000000000000000000000000..add485f2af58c991c95abb9b85ac7428109cfaed
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/postprocess.py
@@ -0,0 +1,1567 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import sys
+import os.path
+import numpy as np
+import numpy.linalg as nplin
+
+from .config import get_config, get_config_int, get_config_num, get_config_bool
+from .parallel import parallel_apply
+from .physconst import r_vonklitzing, eoverhbar, cLight, hbar, e_el
+from . import cmdargs
+from .momentum import Vector, VectorGrid
+from .diagonalization import DiagData, DiagDataPoint
+
+from .etransform import ETransform
+from .erange import erange_from_target_eres, get_erange
+from .density import DensityDataByBand, DensityScale
+from .density import integrated_observable, integrated_dos, integrated_dos_by_band
+from .density import local_integrated_dos, integrated_dos_ll
+from .density import densityz_energy, densityz_ll as get_densityz_ll
+from .density import opts_to_broadening
+from .observables import all_observables
+from .phystext import format_unit
+
+from .bhz import do_bhz
+from .bhzprint import tex_print_bhz_matrix
+
+from . import tableo
+from . import ploto
+
+
+### SOME HELPER FUNCTIONS ###
+def get_min_xres():
+	"""Get minimal horizontal resolution for DOS plots from configuration.
+	This function obtains the minimal number of points in the horizontal axis
+	(momentum or magnetic field) that DOS plots should have."""
+	return get_config_int('dos_interpolation_points', minval = 0)
+
+def get_min_eres():
+	"""Get minimal energy resolution for DOS plots from configuration.
+	This function obtains the minimal number of points in the energy axis that
+	DOS plots should have."""
+	return get_config_int('dos_energy_points', minval = 0)
+
+def filename_suffix_from_densval(densval):
+	densval_arr = np.asarray(densval)
+	suffix = ["dens_%s%.4f" % ("n" if x > 0 else "p" if x < 0 else "", abs(x)) for x in densval_arr]
+	if len(set(suffix)) < len(suffix):
+		# if suffices would not be unique return indices instead
+		return ["dens_%i" % (j + 1) for j in range(0, len(densval))]
+	else:
+		return suffix
+
+def get_dos_temperature(params, opts):
+	"""Extract 'dostemp' from opts, otherwise use params.temperature"""
+	temperature = opts.get('tempbroadening')
+	return params.temperature if temperature is None else temperature
+
+def get_dos_quantity_and_unit():
+	qty = get_config('dos_quantity', choices = ['k', 's', 'p', 'e', 'momentum', 'states', 'dos', 'n', 'particles', 'carriers', 'cardens', 'charge'])
+	unit = get_config('dos_unit', choices = ['nm', 'cm', 'm'])
+	return qty, unit
+
+### TRANSITIONS ###
+def transitions(
+	params, data, erange, outputid, opts, plotopts, fig_bdep = None,
+	ee_at_idos = None):
+	"""Provide plots and csv tables for optical transitions.
+
+	Arguments:
+	params      PhysParams instance
+	data        DiagData instance with DiagDataPoints with non-trivial
+	            TransitionsData member (ddp.transitions is not None)
+	erange      None or 2-tuple. If set, do not plot/write transitions outside
+	            of this energy range.
+	outputid    String that is inserted into the output file names
+	plotopts    A dict instance with plot options
+	fig_bdep    A reference (integer, matplotlib figure instance) to the figure
+	            where the magnetic-field dependence is plotted. If None, do not
+	            insert transitions into the magnetic-field dependence figure.
+	ee_at_idos  Array containing energy at constant density. This must be an
+	            array of shape (1, n). If None, do not provide plot and table
+	            for filtered transitions.
+
+	Output:
+	All transitions (plot and table)
+	(If ee_at_idos is set, also:)
+	Filtered transitions (plot and table)
+	Total absorption spectrum (plot and table)
+	Delta absorption spectrum (plot and table)
+	(If ee_at_idos is set and fig_bdep is set, also:)
+	Transitions visualized in the magnetic field dependence
+
+	No return value.
+	"""
+	cmap_trans = get_config('color_trans')
+	cmap_idos = get_config('color_idos')
+	do_all_if_filtered = get_config_bool('transitions_all_if_filtered')
+
+	# Broadening
+	temperature = get_dos_temperature(params, opts)
+	broadening = opts_to_broadening(opts, ll = True, default = {'thermal': temperature})
+	# B values; this should usually yield a VectorGrid instance
+	bval = data.get_paramval()
+	# Array of floats for tableo.simple2d because this function does not handle
+	# Vector input correctly
+	bval1 = bval.get_values('b') if isinstance(bval, VectorGrid) else bval
+	if broadening is not None:
+		broadening.apply_width_dependence(bval1, opts['broadening_dep'], in_place = True)
+	if 'verbose' in sys.argv:
+		print('transitions:', broadening)
+
+	if 'well' in params.layerstack.names:
+		lwell = params.layerstack.names.index('well')
+		diel_epsilon = params.layerstack.mparam_layer(lwell)['diel_epsilon']
+		refractive_index = np.sqrt(diel_epsilon)
+		for d in data:
+			if d is not None and d.transitions is not None:
+				d.transitions.set_refractive_index(refractive_index)
+
+	if ee_at_idos is None or do_all_if_filtered:
+		# Provide all transitions only if carrier density is not set or if the
+		# configuration value 'transitions_all_if_filtered' is set to true.
+		ploto.transitions(
+			data, filename = "transitions-all%s.pdf" % outputid, showplot = False,
+			colormap = cmap_trans, **plotopts)
+		tableo.transitions("transitions-all%s.csv" % outputid, data)
+
+	if isinstance(ee_at_idos, np.ndarray) and ee_at_idos.ndim == 2:
+		if ee_at_idos.shape[0] == 1:
+			file_suffix = [""]
+		elif 'cardensrange' in opts and len(opts['cardensrange']) == ee_at_idos.shape[0]:
+			file_suffix = ["-%s" % s for s in filename_suffix_from_densval(opts['cardensrange'])]
+		else:
+			file_suffix = ["-%i" % (j + 1) for j in range(0, ee_at_idos.shape[0])]
+
+		outputid_dens = [outputid + file_suffix[densidx] for densidx in range(ee_at_idos.shape[0])]
+		f_args = (params, data, erange, opts, plotopts, fig_bdep, ee_at_idos.shape[0] > 1)
+		parallel_apply(
+			transitions_filter_worker, list(zip(outputid_dens, ee_at_idos)),
+			f_args=f_args, num_processes=cmdargs.cpus()[0])
+	return
+
+def transitions_filter_worker(density, params, data, erange, opts, plotopts, fig_bdep = None, reset_fig_bdep = False):
+	"""
+	Worker helper function for parallel transitions filtering and plotting.
+	Arguments:
+	density         Tuple containing the per density arguments outputid_dens and
+	                energies list.
+	reset_fig_bdep  Flags whether bdependence figure has to be reset. This is
+	                usually the case as soon as there is more than one density
+	                value / plot.
+	Further arguments: See transitions().
+
+	No return value.
+
+	Development note: This is a separate function instead of a internal	function
+	of transitions to avoid confusion and issues with picklingfor parallel
+	processes, caused by shadowed or reuse variable names.
+	"""
+	outputid_dens, energies = density
+	cmap_trans = get_config('color_trans')
+	cmap_idos = get_config('color_idos')
+	xrange = plotopts.get('xrange')
+	
+	# Broadening
+	temperature = get_dos_temperature(params, opts)
+	broadening = opts_to_broadening(opts, ll = True, default = {'thermal': temperature})
+	# B values; this should usually yield a VectorGrid instance
+	bval = data.get_paramval()
+	# Array of floats for tableo.simple2d because this function does not handle
+	# Vector input correctly
+	bval1 = bval.get_values('b') if isinstance(bval, VectorGrid) else bval
+	if broadening is not None:
+		broadening.apply_width_dependence(bval1, opts['broadening_dep'], in_place = True)
+	if 'verbose' in sys.argv:
+		print('transitions_worker:', broadening)
+
+	if 'well' in params.layerstack.names:
+		lwell = params.layerstack.names.index('well')
+		diel_epsilon = params.layerstack.mparam_layer(lwell)['diel_epsilon']
+		refractive_index = np.sqrt(diel_epsilon)
+
+	# Filter transition at constant density
+	transitions_ampmin = opts['transitions'] if isinstance(opts['transitions'], float) else None
+	filtereddata = data.filter_transitions(energies, broadening=broadening, ampmin=transitions_ampmin)
+
+	# Plot filtered transitions
+	emax, eres = ploto.get_transitions_deltaemax(data)
+	if get_config_bool('transitions_plot'):
+		ploto.transitions(
+			filtereddata, filename="transitions-filtered%s.pdf" % outputid_dens,
+			showplot=False, colormap=cmap_trans, deltaemax=emax, **plotopts
+		)
+	tableo.transitions("transitions-filtered%s.csv" % outputid_dens, filtereddata)
+
+	if fig_bdep is not None:
+		maxnum = get_config_int('transitions_dispersion_num', minval=0)
+		if maxnum == 0:
+			maxnum = None
+		# Reset and redraw figure for subsequent density values
+		# This is rather slow, but unavoidable, since one cannot return
+		# to the original bdependence figure once the transitions have
+		# been drawn.
+		if reset_fig_bdep:
+			fig_bdep_trans = ploto.bands_1d(data, filename="bdependence-transitions%s.pdf" % outputid_dens,
+			                                showplot=False, erange=erange, **plotopts)
+			ploto.add_curves(bval, energies, fig=fig_bdep_trans,
+			                 filename="bdependence-transitions%s.pdf" % outputid_dens, linewidth=2.0)
+		else:
+			fig_bdep_trans = fig_bdep
+		ploto.add_transitions(filtereddata, fig=fig_bdep_trans, maxnum=maxnum,
+		                      filename="bdependence-transitions%s.pdf" % outputid_dens)
+
+	if get_config_bool('transitions_spectra'):  # check spectra output suppression
+		# Plot of (total) absorption and delta absorption (legacy)
+		e_spec = np.arange(1e-3, emax + 0.1 * eres, eres)  # use values from above
+		broadening_type = get_config('transitions_broadening_type',
+		                             choices=['step', 'delta', 'gauss', 'gaussian', 'fermi', 'thermal', 'lorentz',
+		                                      'lorentzian']).lower()
+		broadening_scale = get_config_num('transitions_broadening_scale', minval=0)
+		zmax = get_config_num('plot_transitions_max_absorption', minval=0)
+		frequency_ticks = get_config_bool('plot_transitions_frequency_ticks')
+		precision = tableo.get_precision('table_absorption_precision')
+
+		abs_spec = np.array(
+			[d.transitions.absorption_spectrum(e_spec, 'both', broadening_type, broadening_scale) for d in
+			 filtereddata])
+		abs_spec_delta = np.array(
+			[d.transitions.absorption_spectrum(e_spec, 'delta', broadening_type, broadening_scale) for d in
+			 filtereddata])
+		blabel = ploto.format_axis_label("$B$", r"$\mathrm{T}$")
+		delabel = ploto.format_axis_label(r"$\Delta E$", r"$\mathrm{meV}$")
+		# Absorption spectrum
+		ploto.density2d(
+			params, bval, e_spec, abs_spec, filename="absorption-spectrum%s.pdf" % outputid_dens,
+			energies=None, interpolate=False, xlabel=blabel, ylabel=delabel,
+			colormap=cmap_trans, legend=r"$A$" if "legend" in sys.argv else False,
+			xrange=xrange, zrange=[0, zmax], zunit=False,
+			frequency_ticks=frequency_ticks)
+		tableo.simple2d(
+			"absorption-spectrum%s.csv" % outputid_dens, bval1, e_spec, abs_spec,
+			float_precision=precision, clabel='A(B, E)', axislabels=["B", "E"],
+			axisunits=["T", "meV"])
+		# Delta absorption spectrum
+		ploto.density2d(
+			params, bval, e_spec, abs_spec_delta,
+			filename="absorption-spectrum-delta%s.pdf" % outputid_dens,
+			energies=None, interpolate=False, xlabel=blabel, ylabel=delabel,
+			colormap=cmap_idos, posneg=True,
+			legend=r"$A_+-A_-$" if "legend" in sys.argv else False,
+			xrange=xrange, zrange=[-zmax, zmax], zunit=False,
+			frequency_ticks=frequency_ticks)
+		tableo.simple2d(
+			"absorption-spectrum-delta%s.csv" % outputid_dens, bval1, e_spec,
+			abs_spec_delta, float_precision=precision,
+			clabel='(A+ - A-)(B, E)', axislabels=["B", "E"], axisunits=["T", "meV"])
+
+		# Calculate dielectric functions
+		d_well = params.layerstack.thicknesses_z[lwell]
+		eps_xx = np.array(
+			[d.transitions.dielectric_function(e_spec, d_well, 'xx', gamma=broadening_scale) for d in filtereddata])
+		eps_xy = np.array(
+			[d.transitions.dielectric_function(e_spec, d_well, 'xy', gamma=broadening_scale) for d in filtereddata])
+		eps_p = eps_xx + 1.0j * eps_xy
+		eps_m = eps_xx - 1.0j * eps_xy
+
+		# Calculate complex refractive indices
+		n_p = np.sqrt(eps_p)
+		n_m = np.sqrt(eps_m)
+		n_xx = 0.5 * (n_m + n_p)
+		n_xy = 0.5j * (n_m - n_p)
+
+		# Calculate polarimetry spectra:
+		# Faraday rotation angle
+		rot_spec = 0.5 * e_spec / hbar * np.real(n_p - n_m) * d_well / cLight
+		# Imaginary rotation angle: E_p/E_m = exp(theta)
+		ellip_spec = np.exp(0.5 * e_spec / hbar * np.imag(n_p - n_m) * d_well / cLight)
+		# Ellipticity angle: tan(ellip) = (E_p - E_m)/(E_p + E_m)
+		ellip_spec = np.arctan2(ellip_spec - 1, ellip_spec + 1)
+
+		# Alternative way using a constant refractive index, should be wrong,
+		# but might compare better to legacy absorption:
+		# Faraday rotation angle
+		rot_spec2 = 0.25 * e_spec / hbar * np.imag(eps_xy) * d_well / cLight / refractive_index
+		# Imaginary rotation angle: E_p/E_m = exp(theta)
+		ellip_spec2 = np.exp(0.25 * e_spec / hbar * np.real(eps_xy) * d_well / cLight / refractive_index)
+		# Ellipticity angle: tan(ellip) = (E_p - E_m)/(E_p + E_m)
+		ellip_spec2 = np.arctan2(ellip_spec2 - 1, ellip_spec2 + 1)
+
+		# Rotation spectra
+		ploto.density2d(
+			params, bval, e_spec, rot_spec,
+			filename="rotation-spectrum%s.pdf" % outputid_dens, energies=None,
+			interpolate=False, xlabel=blabel, ylabel=delabel, colormap=cmap_idos,
+			legend=r"Rotation" if "legend" in sys.argv else False, posneg=True,
+			xrange=xrange, zrange=[-0.02, 0.02], zunit=False,
+			frequency_ticks=frequency_ticks)
+		tableo.simple2d(
+			"rotation-spectrum%s.csv" % outputid_dens, bval1, e_spec, rot_spec,
+			float_precision=precision, clabel='Rotation(B, E)',
+			axislabels=["B", "E"], axisunits=["T", "meV"])
+		ploto.density2d(
+			params, bval, e_spec, rot_spec2,
+			filename = "rotation-spectrum2%s.pdf" % outputid_dens, energies = None,
+			interpolate = False, xlabel = blabel, ylabel = delabel, colormap = cmap_idos,
+			legend = r"Rotation" if "legend" in sys.argv else False,
+			posneg = True, xrange=xrange, zrange = [-0.02, 0.02], zunit = False,
+			frequency_ticks = frequency_ticks)
+		# Elipticity spectra
+		ploto.density2d(
+			params, bval, e_spec, ellip_spec,
+			filename="ellipticity-spectrum%s.pdf" % outputid_dens, energies=None,
+			interpolate=False, xlabel=blabel, ylabel=delabel, colormap=cmap_idos,
+			posneg=True, legend=r"Ellipticity" if "legend" in sys.argv else False,
+			xrange=xrange, zrange=[-0.02, 0.02], zunit=False,
+			frequency_ticks=frequency_ticks)
+		tableo.simple2d(
+			"ellipticity-spectrum%s.csv" % outputid_dens, bval1, e_spec, ellip_spec,
+			float_precision=precision, clabel='Ellipticity(B, E)',
+			axislabels=["B", "E"], axisunits=["T", "meV"])
+		ploto.density2d(
+			params, bval, e_spec, ellip_spec2,
+			filename="ellipticity-spectrum2%s.pdf" % outputid_dens, energies=None,
+			interpolate=False, xlabel=blabel, ylabel=delabel, colormap=cmap_idos,
+			posneg=True, legend=r"Ellipticity" if "legend" in sys.argv else False,
+			xrange=xrange, zrange=[-0.02, 0.02], zunit=False,
+			frequency_ticks=frequency_ticks)
+		# Ref. index Re(n_p), Im(n_p)
+		ploto.density2d(
+			params, bval, e_spec, np.real(n_p),
+			filename="ref-index-n_p-real%s.pdf" % outputid_dens, energies=None,
+			interpolate=False, xlabel=blabel, ylabel=delabel, colormap=cmap_trans,
+			legend=r"$\mathrm{Re}(n_+)$" if "legend" in sys.argv else False,
+			xrange=xrange, zrange=[0, 50], zunit=False, frequency_ticks=frequency_ticks)
+		ploto.density2d(
+			params, bval, e_spec, np.imag(n_p),
+			filename="ref-index-n_p-imag%s.pdf" % outputid_dens, energies=None,
+			interpolate=False, xlabel=blabel, ylabel=delabel, colormap=cmap_trans,
+			legend=r"$\mathrm{Im}(n_+)$" if "legend" in sys.argv else False,
+			xrange=xrange, zrange=[0, 30], zunit=False, frequency_ticks=frequency_ticks)
+		# Ref. index Re(n_m), Im(n_m)
+		ploto.density2d(
+			params, bval, e_spec, np.real(n_m),
+			filename="ref-index-n_m-real%s.pdf" % outputid_dens, energies=None,
+			interpolate=False, xlabel=blabel, ylabel=delabel, colormap=cmap_trans,
+			legend=r"$\mathrm{Re}(n_-)$" if "legend" in sys.argv else False,
+			xrange=xrange, zrange=[0, 50], zunit=False, frequency_ticks=frequency_ticks)
+		ploto.density2d(
+			params, bval, e_spec, np.imag(n_m),
+			filename="ref-index-n_m-imag%s.pdf" % outputid_dens, energies=None,
+			interpolate=False, xlabel=blabel, ylabel=delabel, colormap=cmap_trans,
+			legend=r"$\mathrm{Im}(n_-)$" if "legend" in sys.argv else False,
+			xrange=xrange, zrange=[0, 30], zunit=False, frequency_ticks=frequency_ticks)
+		# Ref. index Re(n_xx), Im(n_xx)
+		ploto.density2d(
+			params, bval, e_spec, np.real(n_xx),
+			filename="ref-index-n_xx-real%s.pdf" % outputid_dens, energies=None,
+			interpolate=False, xlabel=blabel, ylabel=delabel, colormap=cmap_trans,
+			legend=r"$\mathrm{Re}(n_{xx})$" if "legend" in sys.argv else False,
+			xrange=xrange, zrange=[0, 50], zunit=False, frequency_ticks=frequency_ticks)
+		ploto.density2d(
+			params, bval, e_spec, np.imag(n_xx),
+			filename="ref-index-n_xx-imag%s.pdf" % outputid_dens, energies=None,
+			interpolate=False, xlabel=blabel, ylabel=delabel, colormap=cmap_trans,
+			legend=r"$\mathrm{Im}(n_{xx})$" if "legend" in sys.argv else False,
+			xrange=xrange, zrange=[0, 30], zunit=False, frequency_ticks=frequency_ticks)
+		# Ref. index Re(n_xy), Im(n_xy)
+		ploto.density2d(
+			params, bval, e_spec, np.real(n_xy),
+			filename="ref-index-n_xy-real%s.pdf" % outputid_dens, energies=None,
+			interpolate=False, xlabel=blabel, ylabel=delabel, colormap=cmap_trans,
+			legend=r"$\mathrm{Re}(n_{xy})$" if "legend" in sys.argv else False,
+			xrange=xrange, zrange=[0, 50], zunit=False, frequency_ticks=frequency_ticks)
+		ploto.density2d(
+			params, bval, e_spec, np.imag(n_xy),
+			filename="ref-index-n_xy-imag%s.pdf" % outputid_dens, energies=None,
+			interpolate=False, xlabel=blabel, ylabel=delabel, colormap=cmap_trans,
+			legend=r"$\mathrm{Im}(n_{xy})$" if "legend" in sys.argv else False,
+			xrange=xrange, zrange=[0, 30], zunit=False, frequency_ticks=frequency_ticks)
+
+
+### BERRY CURVATURE AND HALL CONDUCTIVITY ###
+def berry_k(params, data, erange, outputid, opts, plotopts, idos = None):
+	"""Provide plots of Berry curvature and Hall conductivity. Version for dispersions (momentum dependence).
+
+	Arguments:
+	params      PhysParams instance
+	data        DiagData instance with DiagDataPoints containing 'berry' as an
+	            observable.
+	erange      None or 2-tuple. If set, do not plot Berry curvature for states
+	            outside of this energy range.
+	outputid    String that is inserted into the output file names
+	opts        A dict instance with options
+	plotopts    A dict instance with plot options
+	idos        Integrated DOS as function of energy. If None, do not plot
+	            integrated Berry curvature.
+
+	Output:
+	Berry curvature observable plot
+	Berry curvature integrated observable plot
+	(If idos is set)
+	Berry curvature integrated observable plot as function of integrated DOS.
+	"""
+	berry_bands = (-6, 2)
+	temperature = get_dos_temperature(params, opts)
+	broadening = opts_to_broadening(opts, default = {'thermal': temperature})
+	min_eres = get_min_eres()
+	erange = erange_from_target_eres(erange, min_eres)
+	if opts.get('broadening_dep') is not None and not opts.get('broadening_dep').startswith('auto'):
+		sys.stderr.write("Warning (postprocess.berry_k): Broadening dependence on momentum is ignored.\n")
+
+	ploto.observable(data, params, 'berry', which = berry_bands, filename = "berrycurvature%s.pdf" % outputid)
+	int_obs = integrated_observable(
+		data, 'berry', erange, params, broadening = broadening, split_pm = True)
+	if int_obs is None:
+		sys.stderr.write("ERROR (postprocess.berry_k): Calculation of integrated observable 'berry' has failed.\n")
+		return
+	elif isinstance(int_obs, tuple) and len(int_obs) == 2:
+		# Here, int_dos may be a tuple of two arrays: The contributions from
+		# + and - bands.
+		berry_tot = [io.get_idos() / (2. * np.pi) for io in int_obs]
+		ee = int_obs[0].ee
+	else:
+		berry_tot = int_obs.get_idos() / (2. * np.pi)
+		ee = int_obs.ee
+
+	# If observable 'berryiso' is available, then use an alternative manner to
+	# determine the + and - contributions. The existing elements of int_obs are
+	# added together to obtain total Berry curvature. Then the iso-Berry
+	# curvature is extracted in the same manner from observable 'berryiso'. The
+	# + and - contributions are then [(tot + iso) / 2, (tot - iso) / 2], put
+	# together as new value of int_obs.
+	# TODO: Not sure whether it is reliable in presence of a magnetic field.
+	if 'berryiso' in data[0].obsids:
+		if isinstance(berry_tot, list):  # sum berry_tot over the components (if any)
+			berry_tot = np.sum(berry_tot, axis = 0)
+		int_obs = integrated_observable(
+			data, 'berryiso', erange, params, broadening = broadening,
+			split_pm = True)
+		if int_obs is None:
+			sys.stderr.write("ERROR (postprocess.berry_k): Calculation of integrated observable 'berryiso' has failed.\n")
+			return
+		elif isinstance(int_obs, tuple) and len(int_obs) == 2:
+			berry_iso = (int_obs[0].get_idos() + int_obs[1].get_idos()) / (2. * np.pi)
+		else:
+			berry_iso = int_obs.get_idos() / (2. * np.pi)
+		berry = [(berry_tot + berry_iso) / 2, (berry_tot - berry_iso) / 2]
+	else:
+		berry = berry_tot
+
+	axislabel = ploto.format_axis_label(r"$\sigma_\mathrm{H}$", "$e^2/h$")
+	ploto.integrated_observable(
+		params, ee, berry, filename = "berrycurvature-integrated%s.pdf" % outputid,
+		xlabel = axislabel, orange = [-3.5, 3.5])
+
+	if idos is not None:
+		ploto.integrated_observable(
+			params, ee, berry, filename = "berrycurvature-integrated-vs-n%s.pdf" % outputid,
+			xlabel = axislabel, orange = [-3.5, 3.5], idos = idos)
+	return
+
+def berry_ll(params, data, erange, outputid, opts, plotopts):
+	"""Provide plots and tables of Chern number / Berry curvature and Hall conductivity. Version for LL data.
+	Note that Chern number is the Berry curvature integrated over momentum
+	space. This integration is done implicitly when calculating these values.
+	See berrycurv_ll() and berrycurv_ll_full() in berry.py.
+
+	Arguments:
+	params      PhysParams instance
+	data        DiagData instance with DiagDataPoints containing 'chern' and
+	            'chernsim' as observables.
+	erange      None or 2-tuple. If set, do not plot Chern number / Berry
+	            curvature for states outside of this energy range.
+	outputid    String that is inserted into the output file names
+	opts        A dict instance with options
+	plotopts    A dict instance with plot options
+
+	Output:
+	Chern number / Berry curvature integrated observable plot
+	Integrated Chern number / Berry curvature (Hall conductivity sigma) as
+	    function of energy and magnetic field (plot and table)
+	Local Chern number / Berry curvature (dsigma/dE; plot)
+	Chern number / Berry curvature integrated observable plot in high resolution
+	Integrated Chern number / Berry curvature (Hall conductivity sigma)
+	    as function of integrated DOS and magnetic field (plot and table)
+	Local Chern number / Berry curvature (dsigma/dn; plot and table)
+	Hall conductivity at constant densities
+	Hall Resistance (Rxy) at constant densities
+
+	No return value.
+	"""
+	min_xres = get_min_xres()
+	min_eres = get_min_eres()
+	erange = erange_from_target_eres(erange, min_eres)
+	erange_hires = erange_from_target_eres(erange, 10 * min_eres)
+	cmap_idos = get_config('color_idos')
+	cmap_localdos = get_config('color_localdos')
+	berry_obs = 'chernsim' if get_config_bool('berry_ll_simulate') else 'chern'
+	simul_str = '-simul' if get_config_bool('berry_ll_simulate') else ''
+	precision = tableo.get_precision('table_berry_precision')
+	label_style, unit_style = tableo.get_label_unit_style()
+	sigmah_csvlabel = {'none': None, 'false': None, 'raw': 'IntChern', 'plain': 'sigmaH', 'unicode': '\u03c3H', 'tex': r"$\sigma_\mathrm{H}$"}[label_style]
+	dsdn_csvlabel = {'none': None, 'false': None, 'raw': 'dsigma/dn', 'plain': 'dsigmaH/dn', 'unicode': 'd\u03c3H/dn', 'tex': r"$d\sigma_\mathrm{H}/dn$"}[label_style]
+	sigmah_unit = format_unit('e^2/h', style = unit_style, negexp = False)
+	dsdn_unit = format_unit('1/T', style = unit_style, negexp = False)
+	temperature = get_dos_temperature(params, opts)
+
+	bs = data.get_paramval()
+	bzval = np.asarray(data.get_paramval('z'))
+	broadening, berry_broadening = opts_to_broadening(opts, berry = True, ll = True, default = {'thermal': temperature})
+	if broadening is not None:
+		broadening.apply_width_dependence(bzval, opts['broadening_dep'], in_place = True)
+	if berry_broadening is not None:
+		berrybroadening_dep = opts.get('berrybroadening_dep')
+		if berrybroadening_dep is None:
+			berrybroadening_dep = opts.get('broadening_dep')
+		berry_broadening.apply_width_dependence(bzval, berrybroadening_dep, in_place = True)
+	if 'verbose' in sys.argv:
+		print('berry_ll broadening:', broadening)
+		print('berry_ll berry_broadening:', berry_broadening)
+
+	b_plotlabel = ploto.format_axis_label("$B$", r"$\mathrm{T}$")
+	sigmah_plotlabel = ploto.format_axis_label(r"$\sigma_\mathrm{H}$", "$e^2/h$")
+	dsde_plotlabel = "$d\\sigma_\\mathrm{H}/dE$\n" + ploto.format_axis_unit("$e^2/h/\\mathrm{meV}$")
+
+	sys.stderr.write("Calculating integrated observable (%s) ...\n" % berry_obs)
+	int_obs = integrated_observable(
+		data, berry_obs, erange, params, broadening = berry_broadening,
+		local = True, min_res = min_xres)
+	if int_obs is None:
+		sys.stderr.write("ERROR (postprocess.berry_ll): Calculation of integrated observable has failed.\n")
+		return
+	bval, ee, iobs = int_obs.xyz_idos()
+	bzval = bval.get_values('bz') if isinstance(bval, VectorGrid) else bval
+	if iobs is None:
+		sys.stderr.write("ERROR (postprocess.berry_ll): Integrated observable is not well-defined.\n")
+		return
+
+	legend = ploto.format_axis_label(r"$\sigma_\mathrm{H}$", "$e^2/h$") if "legend" in sys.argv else False
+	ploto.integrated_observable(
+		params, ee, iobs, filename = "%s-integrated%s.pdf" % (berry_obs, outputid),
+		xlabel = sigmah_plotlabel, title = '$B_z = %.3g$ T', title_val = bzval,
+		orange = [-7.0, 7.0])
+	# Format Bz value the same as in ploto.toolstext.get_partext()
+	ploto.density2d(
+		params, bval, ee, iobs, filename = "sigmah%s%s.pdf" % (simul_str, outputid),
+		energies = None, interpolate = True, xlabel = b_plotlabel,
+		colormap = cmap_idos, legend = legend, posneg = True, contours = True,
+		zunit = False, xrange = plotopts.get('xrange'), zrange = [-7.0, 7.0])
+	tableo.simple2d(
+		"sigmah%s%s.csv" % (simul_str, outputid), bval, ee, iobs,
+		float_precision = precision, clabel = 'IntChern(B, E)',
+		axislabels = ["B", "E"], axisunits = ["T", "meV"],
+		datalabel = sigmah_csvlabel, dataunit = sigmah_unit)
+	dsde = int_obs.get_idos()  # d\sigma / dE
+	legend = dsde_plotlabel if "legend" in sys.argv else False
+	ploto.density2d(
+		params, bval, ee, dsde, filename = "dsigmah-de%s%s.pdf" % (simul_str, outputid),
+		energies = None, interpolate = True, xlabel = b_plotlabel,
+		colormap = cmap_localdos, legend = legend, posneg = True, contours = False,
+		zunit = False, xrange = plotopts.get('xrange'), zrange = [-3.0, 3.0])
+
+	if berry_broadening is not None:
+		berry_broadening.apply_width_dependence(bs.get_values('bz'), berrybroadening_dep, in_place = True)
+
+	sys.stderr.write("Calculating integrated observable (%s) in high res ...\n" % berry_obs)
+	int_obs = integrated_observable(
+		data, berry_obs, erange_hires, params, broadening = berry_broadening,
+		local = True, min_res = min_xres)
+	bval, berry_ee, iobs = int_obs.xyz_idos()
+	bzval = bval.get_values('bz') if isinstance(bval, VectorGrid) else bval
+
+	densitydata = integrated_dos_ll(
+		data, erange_hires, params, broadening = broadening, min_res = min_xres)
+	if densitydata is None:
+		sys.stderr.write("ERROR (postprocess.berry_ll): Calculation of density has failed.\n")
+		return
+	lidos = densitydata.get_idos()
+	lidos_last = densitydata.scaledvalues(lidos[-1])
+	dens_qty, dens_unit = get_dos_quantity_and_unit()
+	densitydata.set_scale(dens_qty, dens_unit)
+	qstr = densitydata.qstr(style = 'tex', scaled = True)
+	ustr = ploto.format_axis_unit(densitydata.unitstr(style = 'tex', scaled = True))
+	denslabel = "%s %s" % (qstr, ustr)
+	ploto.integrated_observable(
+		params, berry_ee, iobs, filename = "%s-integrated-vs-n%s.pdf" % (berry_obs, outputid),
+		xlabel = sigmah_plotlabel, ylabel = denslabel, title = '$B_z = %.3g$ T',
+		title_val = bzval, orange = [-7.0, 7.0], idos = lidos_last)
+	# Format Bz value the same as in ploto.toolstext.get_partext()
+
+	# Calculate sigma_H as function of n
+	idos_val = np.arange(-0.002, 0.002001, 0.00001)
+	densrange = plotopts.get('density_range')
+	if densrange is not None:
+		if densrange[0] is None:
+			idos_val = np.arange(-densrange[1], densrange[1], 0.00001)
+		else:
+			idos_val = np.arange(densrange[0], densrange[1], 0.00001)
+	int_obs_vs_n = int_obs.pushforward(densitydata, idos_val)
+	dscale = DensityScale(idos_val, dens_qty, dens_unit, kdim = 2, ll = True)
+	denslabel = "%s %s" % (dscale.qstr(style = 'tex'), ploto.format_axis_unit(dscale.unitstr(style = 'tex')))  # LaTeX (plot)
+	dens_q, dens_u = dscale.qstr(style = label_style), dscale.unitstr(style = unit_style)  # apply styles (table)
+
+	plotopts1 = {}
+	for po in plotopts:
+		plotopts1[po] = plotopts[po]
+	plotopts1['legend'] = sigmah_plotlabel if plotopts['legend'] is not False else False
+
+	ploto.density2d(
+		params, bval, dscale.scaledvalues(idos_val), int_obs_vs_n,
+		filename = "sigmah%s-vs-n%s.pdf" % (simul_str, outputid), energies = None,
+		interpolate = True, xlabel = b_plotlabel, ylabel = denslabel,
+		yunit = True, zunit = False, colormap = cmap_idos, posneg = True,
+		contours = True, zrange = [-7.0, 7.0], **plotopts1)
+
+	bs = np.array([b.len() for b in bval]) if isinstance(bval, VectorGrid) or len(bval) > 0 and isinstance(bval[0], Vector) else np.array(bval)
+	dscale.scaledvalues(idos_val)
+	tableo.simple2d(
+		"sigmah%s-vs-n%s.csv" % (simul_str, outputid), bs, dscale.scaledvalues(idos_val), int_obs_vs_n,
+		float_precision = precision, clabel = 'IntChern(B, n)',
+		axislabels = ["B", dens_q], axisunits = ["T", dens_u],
+		datalabel = sigmah_csvlabel, dataunit = sigmah_unit)
+
+	## Calculate dsigma / dn
+	## In 'native' units, dsigma / dn is expressed in terms of (e^2/h) / (e/nm^2)
+	## Simplifying: (e^2/h) / (e/nm^2) = (e/h) nm^2 = e/(2 pi hbar) nm^2
+	## We can thus convert to T^-1 (inverse tesla) by multiplication by e/(2 pi hbar)
+	dsdn = (eoverhbar / 2 / np.pi) * np.gradient(int_obs_vs_n, axis = 1) / np.gradient(idos_val)
+	plotopts1['legend'] = "$d\\sigma_\\mathrm{H}/dn$ " + ploto.format_axis_unit("$\\mathrm{T}^{-1}$") if plotopts['legend'] is not False else False
+	ploto.density2d(
+		params, bval, dscale.scaledvalues(idos_val), dsdn,
+		filename = "dsigmah-dn%s%s.pdf" % (simul_str, outputid), energies = None,
+		interpolate = True, xlabel = b_plotlabel, ylabel = denslabel,
+		colormap = cmap_localdos, posneg = False, contours = False,
+		yunit = True, zunit = False, zrange = [-2.0, 8.0], **plotopts1)
+	tableo.simple2d(
+		"dsigmah-dn%s%s.csv" % (simul_str, outputid), bs, dscale.scaledvalues(idos_val), dsdn,
+		float_precision = precision, clabel = 'dsigma/dn(B, n)',
+		axislabels = ["B", dens_q], axisunits = ["T", dens_u],
+		datalabel = dsdn_csvlabel, dataunit = dsdn_unit)
+
+	# plot LDOS at constant DOS (densval)
+	if 'cardensrange' in opts:
+		densval = np.array(opts['cardensrange'])
+	elif 'cardens' in opts:
+		densval = np.array([opts['cardens']])
+	else:
+		densval = np.linspace(-0.015, 0.015, 31)
+	densval = np.asarray(densval)
+	int_obs_vs_n = int_obs.pushforward(densitydata, densval)
+	plotopts1['legend'] = plotopts['legend']
+
+	ploto.at_constant_dens_ll(
+		bval, np.array(densval), int_obs_vs_n.T, "sigmah%s-constdens%s.pdf" % (simul_str, outputid),
+		ylabel = sigmah_plotlabel, yrange = [-7.0, 7.0], is_ldos = False, **plotopts1)
+
+	# plot Rxy (Hall resistance)
+	with np.errstate(divide='ignore'):  # ignore 'division by zero' warning
+		rxy = r_vonklitzing / int_obs_vs_n
+		rxy[np.abs(rxy) > 2e5] = float("nan")
+
+	# Hall 'slope': Classical Hall resistance Rxy = B / (n e)
+	hall_slope = get_config_bool('plot_rxy_hall_slope')
+	def hall_rxy(b, n):
+		with np.errstate(divide='ignore', invalid='ignore'):  # ignore 'division by zero' and 'invalid value in true_divide' warnings
+			return 1e-3 * b / (n * 1e18 * e_el)
+
+	ploto.at_constant_dens_ll(
+		bval, np.array(densval), rxy.T / 1e3, "rxy%s-constdens%s.pdf" % (simul_str, outputid),
+		ylabel = ploto.format_axis_label(r"$R_{xy}$", r"$\mathrm{k}\Omega$"),
+		yrange = [-50.0, 50.0], is_ldos = False,
+		extra_function = hall_rxy if hall_slope else None, **plotopts1)
+	return
+
+### (LOCAL) DENSITY OF STATES ###
+def dos_k(
+	params, data, erange, outputid, opts, plotopts, energies = None,
+	onedim = False):
+	"""Total density of states. Version for dispersions (momentum dependence).
+
+	Arguments:
+	params      PhysParams instance
+	data        DiagData instance
+	erange      None or 2-tuple. If set, do not plot Berry curvature for states
+	            outside of this energy range.
+	outputid    String that is inserted into the output file names
+	opts        A dict instance with options
+	plotopts    A dict instance with plot options
+	energies    A dict instance with special energies, e.g., e0 and e_cnp.
+	onedim      If True, indicate that we are considering a strip geometry. If
+	            False, use the number of dimensions from params.kdim. This is
+	            relevant for the units, for example.
+
+	Output:
+	DOS and integrated DOS as function of energy (plot and table)
+	DOS as function of integrated DOS (plot)
+
+	Returns:
+	idos        Array with integrated density of states.
+	energies    Updated energies dict (e.g., with chemical potential 'mu')
+	"""
+	if energies is None:
+		energies = {}
+	e0 = energies.get('e0')
+	min_eres = get_min_eres()
+	erange = erange_from_target_eres(erange, min_eres)
+	cardens = None if 'cardens' not in opts else opts['cardens']
+	dens_qty, dens_unit = get_dos_quantity_and_unit()
+	unit_negexp = get_config_bool('plot_dos_units_negexp')
+
+	e_cnp = opts.get('e_cnp')
+	n_offset = opts.get('n_offset')
+	temperature = get_dos_temperature(params, opts)
+	broadening = opts_to_broadening(opts, default = {'thermal': temperature})
+	if opts.get('broadening_dep') is not None and not opts.get('broadening_dep').startswith('auto'):
+		sys.stderr.write("Warning (postprocess.dos_k): Broadening dependence on momentum is ignored.\n")
+
+	# Try to get zero energy from band indices if it is not given
+	if e0 is None:
+		d0 = data.get_zero_point()
+		if d0 is not None and d0.bindex is not None:
+			e0 = d0.get_eival0()
+
+	sys.stderr.write("Calculating DOS...\n")
+	densitydata = integrated_dos(data, erange, params, calculate_ef = True, radial = not onedim, broadening = broadening)
+	if densitydata is None:
+		sys.stderr.write("ERROR (postprocess.dos_k): Calculation of density has failed.\n")
+		return None, None
+	densitydata.set_special_energies(e0 = e0)
+	densitydata.offset(e_cnp = e_cnp, n_offset = n_offset)
+	if cardens is not None:
+		densitydata.energy_at_idos(cardens, save_as = 'ef')
+	densitydata.set_scale(dens_qty, dens_unit)
+
+	if broadening is not None and 'verbose' in sys.argv:
+		broadening.print_verbose()
+
+	# Output to stdout
+	densitydata.print_special_energies(at_density = cardens, density_offset = n_offset)
+	energies.update(densitydata.get_special_energies())
+	if get_config_bool('dos_print_validity_range'):
+		print("IDOS/DOS validity range: %s" % densitydata.print_validity_range())
+
+	# Plot
+	ploto.dos_idos(params, densitydata, outputid = outputid, **plotopts)
+
+	# Table file
+	tableo.dos_idos(params, densitydata, outputid = outputid)
+
+	## Plot dispersion as function of density
+	idos = densitydata.get_idos()
+	if len(data.shape) != 1:
+		sys.stderr.write("Warning (postprocess.dos_k): Dispersion vs density plot is available only for dispersion along 1 dimension.\n")
+		return idos, energies
+	if idos is None:
+		sys.stderr.write("Warning (postprocess.dos_k): Dispersion vs density plot is available only if IDOS is well-defined.\n")
+		return idos, energies
+
+	density_range = plotopts.get('density_range')
+	if isinstance(density_range, list) and len(density_range) == 2:
+		densrange = [-density_range[1], density_range[1]] if density_range[0] is None else density_range
+	else:
+		densrange = None
+	densitydata.set_scale(dens_qty, dens_unit, scaled_limits = densrange)
+	idos = densitydata.get_idos(scaled = True)
+	ee = densitydata.ee
+	dscale = densitydata.get_scale()
+	qstr = densitydata.qstr(style = 'tex', integrated = True, scaled = True)
+	ustr = densitydata.unitstr(style = 'tex', integrated = True, scaled = True, negexp = unit_negexp)
+	plotrange = (idos[0], idos[-1]) if densrange is None else densrange if dscale is None else (dscale.scaledmin, dscale.scaledmax)
+	etfm = ETransform(ee, idos, qstr = qstr, ustr = ustr, plotrange = plotrange)
+
+	ploto.bands_1d(
+		data, filename = "dispersion-vs-n%s.pdf" % outputid, showplot = False,
+		erange = erange, energies = energies, transform = etfm, **plotopts)
+
+	label_style = get_config('table_dispersion_obs_style', ['raw', 'plain', 'unicode', 'tex'])
+	unit_style = get_config('table_dispersion_unit_style', ['raw', 'plain', 'unicode', 'tex'])
+	etfm.qstr = densitydata.qstr(style = label_style, integrated = True, scaled = True)
+	etfm.ustr = densitydata.unitstr(style = unit_style, integrated = True, scaled = True, negexp = unit_negexp)
+	tableo.disp_byband("dispersion-vs-n%s.csv" % outputid, data, params, erange = erange, transform = etfm)
+	return idos, energies
+
+# Density of states (split by observable)
+def dos_byobs(ll_or_k, params, data, obs, erange, outputid, opts, plotopts, **kwds):
+	"""Density of states, split by observable value
+	Wrapper around dos_k() or dos_ll(). For arguments, see the respective
+	functions.
+
+	Additional arguments:
+	ll_or_k  String. Must be 'll' or 'k' ('momentum' is also acceptable). This
+	         determines which DOS function should be used.
+	obs      String. Observable whose values are grouped and selected.
+
+	No return value
+	"""
+	if ll_or_k.lower() == 'll':
+		dos_func = dos_ll
+	elif ll_or_k.lower() == 'k' or ll_or_k.lower() == 'momentum':
+		dos_func = dos_k
+	else:
+		raise ValueError("Argument ll_or_k should be 'll' or 'k'/'momentum'.")
+
+	accuracy = 1e-3
+	all_obsval = np.concatenate([ddp.get_observable(obs) for ddp in data.data])
+	obsval_round = np.real(np.round(all_obsval / accuracy) * accuracy)
+	unique_values = np.unique(obsval_round)
+	if len(unique_values) > 4:
+		sys.stderr.write('Warning (postprocess.dos_byobs): Too many different values for observable %s. Skip plot and output.\n' % str(obs))
+		return
+	for j, val in enumerate(unique_values):
+		outputid1 = outputid + ('-by%s-%i' % (str(obs), j + 1))
+		print("Info (postprocess.dos_byobs): Files numbered %i (names '%s') is %s = %g." % (j + 1, outputid1, str(obs), val))
+		datapoints_new = [ddp.select_obs(obs, val, accuracy = 0.5 * accuracy) for ddp in data]
+		if any([len(ddp.eival) <= 2 for ddp in datapoints_new]):
+			sys.stderr.write('Warning (postprocess.dos_byobs): Not enough data for %s = %g. Skip plot and output.\n' % (str(obs), val))
+			continue
+		data1 = DiagData(datapoints_new, grid = data.grid)
+		dos_func(params, data1, erange, outputid1, opts, plotopts, **kwds)
+	return
+
+# Density of states (LL mode)
+def dos_ll(params, data, erange, outputid, opts, plotopts, fig_bdep = None):
+	"""Total density of states. Version for LL data.
+
+	Arguments:
+	params      PhysParams instance
+	data        DiagData instance
+	erange      None or 2-tuple. If set, do not plot Berry curvature for states
+	            outside of this energy range.
+	outputid    String that is inserted into the output file names
+	opts        A dict instance with options
+	plotopts    A dict instance with plot options
+	fig_bdep    A reference (integer, matplotlib figure instance) to the figure
+	            where the magnetic-field dependence (Landau fan) is plotted. If
+	            set, draw equal-density curves into the Landau fan. If None, do
+	            not do so.
+
+	Output:
+	Equal-density curves in the Landau-fan plot; table with the energy
+	    dependence of these curves
+	Local density of states as function of B for constant density (plot)
+	Local density of states as function of 1/B for constant density (plot),
+	    i.e., a "Shubnikov-De Haas plot"
+	Local integrated density of states (as function of B, E; plot and table)
+	Numeric local integrated density of states (as function of B, E in units of
+	    e^2/h; plot and table)
+	Integrated observable and observable density (experimental)
+	Landau fan as function of density (integrated DOS; plot)
+
+	Returns:
+	ee_at_idos  Array with energies at predefined carrier density
+	"""
+	min_xres = get_min_xres()
+	min_eres = get_min_eres()
+	erange = erange_from_target_eres(erange, min_eres)
+	erange_hires = erange_from_target_eres(erange, 10 * min_eres)
+	cmap_idos = get_config('color_idos')
+	cmap_localdos = get_config('color_localdos')
+	temperature = get_dos_temperature(params, opts)
+	broadening = opts_to_broadening(opts, ll = True, default = {'thermal': temperature})
+	precision = tableo.get_precision('table_dos_precision')
+	label_style, unit_style = tableo.get_label_unit_style()
+	label_idos = {'none': None, 'false': None, 'raw': 'IDOS', 'plain': 'n', 'unicode': 'n', 'tex': r"$n$"}[label_style]
+	label_ndos = label_idos
+	unit_idos = format_unit('1/nm^2', style = unit_style)
+	unit_ndos = {'none': None, 'false': None, 'raw': '1', 'plain': '1', 'unicode': '1', 'tex': r"$1$"}[unit_style]
+	unit_negexp = get_config_bool('plot_dos_units_negexp')
+	magn_axislabel = ploto.format_axis_label("$B$", r"$\mathrm{T}$")
+	dens_qty, dens_unit = get_dos_quantity_and_unit()
+	bval = np.array(data.get_paramval())
+	bzval = np.array(data.get_paramval('z'))
+	if broadening is not None:
+		broadening.apply_width_dependence(bzval, opts['broadening_dep'], in_place = True)
+	if 'verbose' in sys.argv:
+		print('dos_ll: broadening', broadening)
+		if broadening is not None:
+			broadening.print_verbose()
+
+	## Constant DOS contours
+	## Calculate LDOS if 'dos' command line argument is combined with 'localdos'
+	sys.stderr.write("Calculating DOS (iso-density contours)...\n")
+	if 'cardensrange' in opts:
+		densval = np.array(opts['cardensrange'])
+		linewidths = [2.0 if int(round(10000 * dens)) % 100 == 0 else 1.25 if int(round(10000 * dens)) % 50 == 0 else 0.75 for dens in densval]
+	elif 'cardens' in opts:
+		densval = np.array([opts['cardens']])
+		linewidths = 2.0
+	else:
+		densval = np.linspace(-0.015, 0.015, 31)
+		linewidths = [2.0 if int(round(10000 * dens)) % 100 == 0 else 1.25 if int(round(10000 * dens)) % 50 == 0 else 0.75 for dens in densval]
+
+	do_ldos = True  # TODO: Make optional?
+	densitydata = integrated_dos_ll(
+		data, erange, params, min_res = min_xres, broadening = broadening)
+	if densitydata is None:
+		sys.stderr.write("ERROR (postprocess.dos_ll): Calculation of density has failed.\n")
+		return None
+	_, ee_at_idos, ldos_at_idos = densitydata.energy_at_dos_ll(densval, do_ldos = do_ldos)
+
+	# add iso-DOS contour(s) to LL plot
+	if ee_at_idos is not None:
+		if fig_bdep is not None:
+			ploto.add_curves(bval, ee_at_idos, fig = fig_bdep, filename = "bdependence-density%s.pdf" % outputid, linewidth = linewidths)
+		tableo.energy_at_density("energy-at-density%s.csv" % outputid, bval, densval, ee_at_idos, float_precision = precision)
+
+	# plot LDOS at constant DOS
+	if ldos_at_idos is not None:
+		ploto.at_constant_dens_ll(
+			bval, densval, ldos_at_idos, "dos-constdens%s.pdf" % outputid,
+			is_ldos = True, **plotopts)
+		ploto.at_constant_dens_ll(
+			bval, densval, ldos_at_idos, "dos-constdens-sdh%s.pdf" % outputid,
+			is_ldos = True, reciprocal = True, **plotopts)
+
+	# Calculate integrated/total DOS
+	sys.stderr.write("Calculating DOS (total)...\n")
+	densitydata.set_scale(dens_qty, dens_unit)
+	bval, ee = densitydata.xval, densitydata.ee
+	lidos = densitydata.get_idos()
+	fig_ldos = ploto.local_density(
+		params, densitydata, filename = "dos-total%s.pdf" % outputid,
+		interpolate = True, xlabel = magn_axislabel, colormap = cmap_idos,
+		posneg = True, contours = False, integrated = True, zunit = True, **plotopts)
+	bval1 = bval.get_values('b') if isinstance(bval, VectorGrid) else bval
+	tableo.local_density(
+		params, densitydata, filename = "dos-total%s.csv" % outputid,
+		clabel = 'DOS({x}, E)', integrated = True)
+	if ee_at_idos is not None and fig_ldos is not None:
+		ploto.add_curves(bval, ee_at_idos, fig = fig_ldos, filename = "dos-total%s.pdf" % outputid, linewidth = linewidths)
+
+	# Calculate 'numeric' DOS
+	sys.stderr.write("Calculating DOS (numeric)...\n")
+	ndos = densitydata.get_numeric_dos_ll(method = 'division')
+	ploto.dos_ll(
+		params, bval, ee, ndos, outputid = outputid, energies = None,
+		interpolate = True, xlabel = magn_axislabel, colormap = cmap_idos,
+		contours = True, xrange = plotopts.get('xrange'), legend = "legend" in sys.argv)
+	bval1 = bval.get_values('b') if isinstance(bval, VectorGrid) else bval
+
+	tableo.simple2d(
+		"dos-numeric%s.csv" % outputid, bval1, ee, ndos,
+		float_precision = precision, clabel = 'NDOS(B, E)',
+		axislabels = ["B", "E"], axisunits = ["T", "meV"],
+		datalabel = label_ndos, dataunit = unit_ndos)
+
+	# Calculate integrated observable (experimental; TODO: test and improve)
+	obsid = plotopts.get('obs')
+	if obsid is None:
+		pass
+	elif obsid not in data[0].obsids or obsid not in all_observables:
+		sys.stderr.write("ERROR (postprocess.dos_ll): Requested integrated observable '%s' is not available.\n" % obsid)
+	else:
+		sys.stderr.write("Calculating integrated observable...\n")
+		int_obs = integrated_observable(
+			data, obsid, erange, params, broadening = broadening, local = True,
+			min_res = min_xres)
+		if int_obs is not None:
+			obs = all_observables[obsid]
+			qstr = obs.to_str(style = 'tex').strip('$').lstrip('$')  # TODO: dimful
+			plotlabel = "$\\mathcal{I}[%s]$\n" % qstr  # TODO: Unit. Is it even well-defined at all?
+			plotopts1 = {}
+			for po in plotopts:
+				plotopts1[po] = plotopts[po]
+			plotopts1['legend'] = plotlabel if plotopts['legend'] is not False else False
+			ploto.density2d(
+				params, *int_obs.xyz_idos(),
+				filename = "int-obs-%s%s.pdf" % (obsid, outputid),
+				energies = None, interpolate = True, xlabel = magn_axislabel,
+				colormap = cmap_idos, posneg = True, contours = False,
+				integrated = True, ll = True, yunit = False, zunit = False, **plotopts1)
+			tableo.simple2d(
+				"int-obs-%s%s.csv" % (obsid, outputid), *int_obs.xyz_idos(),
+				float_precision = precision, clabel = 'int(%s)(B, E)' % obsid,
+				axislabels = ["B", "E"], axisunits = ["T", "meV"])
+
+			try:
+				cmapid = get_config('color_' + obs.colordata)
+			except:
+				cmapid = cmap_localdos
+			zrange = obs.get_range()
+			plotlabel = ("$\\mathcal{D}[%s]$\n" % qstr)
+			plotopts1['legend'] = plotlabel if plotopts['legend'] is not False else False
+			# TODO: Unit
+			ploto.density2d(
+				params, *int_obs.xyz_dos(),
+				filename = "dens-obs-%s%s.pdf" % (obsid, outputid),
+				energies = None, interpolate = True, xlabel = magn_axislabel,
+				colormap = cmapid, posneg = False, contours = False,
+				integrated = True, ll = True, yunit = False, zunit = False,
+				zrange = zrange, **plotopts1)
+			tableo.simple2d(
+				"dens-obs-%s%s.csv" % (obsid, outputid), *int_obs.xyz_dos(),
+				float_precision = precision, clabel = 'dens(%s)(B, E)' % obsid,
+				axislabels = ["B", "E"], axisunits = ["T", "meV"])
+
+	# LL fan as function of density (do not interpolate in x direction!)
+	sys.stderr.write("Calculating DOS (for LL fan)...\n")
+	densitydata_hires = integrated_dos_ll(data, erange_hires, params, broadening = broadening)
+	if densitydata_hires is None:
+		sys.stderr.write("Warning (postprocess.dos_ll): High-resolution calculation of density has failed. Proceeding with low-resolution result.\n")
+		densitydata_hires = densitydata
+	bval, dos_ee = densitydata_hires.xval, densitydata_hires.ee
+	lidos = densitydata_hires.get_idos()
+	if lidos is None:
+		sys.stderr.write("ERROR (postprocess.dos_ll): Calculation of local density has failed.\n")
+		return ee_at_idos
+	b1 = bval
+	if 'plotvar' in plotopts and plotopts['plotvar'] is not None:
+		try:
+			b1 = data.grid.get_values(plotopts['plotvar'])
+		except:
+			sys.stderr.write("Warning (postprocess.dos_ll): Invalid 'plotvar'. The plot will use the default variable instead.\n")
+	if isinstance(b1, VectorGrid):
+		b1, _, _, _ = b1.get_var_const()  # returns val, var, constval, const
+	elif all([isinstance(b, Vector) for b in b1]):
+		b1 = np.array([b.z() for b in b1])
+
+	density_range = plotopts.get('density_range')
+	if density_range is None and 'cardensrange' in opts:
+		if len(densval) > 1:
+			span = densval.max() - densval.min()
+			densrange = np.array([densval.min() - 0.1 * span, densval.max() + 0.1 * span])
+		else:
+			densrange = np.sort(np.array([0, 2 * densval[0]]))
+	elif isinstance(density_range, list) and len(density_range) == 2:
+		densrange = [-density_range[1], density_range[1]] if density_range[0] is None else density_range
+	else:
+		densrange = None
+	densitydata_hires.set_scale(dens_qty, dens_unit, scaled_limits = densrange)
+	idos = densitydata_hires.get_idos(scaled = True)
+	ee = densitydata_hires.ee
+	xval = densitydata_hires.xval
+	dscale = densitydata_hires.get_scale()
+	qstr = densitydata_hires.qstr(style = 'tex', integrated = True, scaled = True)
+	ustr = densitydata_hires.unitstr(style = 'tex', integrated = True, scaled = True, negexp = unit_negexp)
+	plotrange = (idos.min(), idos.max()) if densrange is None else densrange if dscale is None else (dscale.scaledmin, dscale.scaledmax)
+	etfm = ETransform(ee, idos, qstr = qstr, ustr = ustr, plotrange = plotrange, xval = xval)
+
+	ploto.bands_1d(data, filename = "bdependence-vs-n%s.pdf" % outputid, showplot = False, erange = erange, transform = etfm, **plotopts)
+
+	label_style = get_config('table_dispersion_obs_style', ['raw', 'plain', 'unicode', 'tex'])
+	unit_style = get_config('table_dispersion_unit_style', ['raw', 'plain', 'unicode', 'tex'])
+	etfm.qstr = densitydata_hires.qstr(style = label_style, integrated = True, scaled = True)
+	etfm.ustr = densitydata_hires.unitstr(style = unit_style, integrated = True, scaled = True, negexp = unit_negexp)
+	b = data.get_paramval()
+	tableo.disp_byband("bdependence-vs-n%s.csv" % outputid, data, params, erange = erange, transform = etfm, dependence = [b, "b", "T"])
+	return ee_at_idos
+
+## Density as function of z and energy
+def densityz(params, data, erange, outputid, opts, plotopts):
+	"""Total density of states of function of z and energy.
+
+	Arguments:
+	params      PhysParams instance
+	data        DiagData instance
+	erange      None or 2-tuple. Calculate densities within this energy range.
+	outputid    String that is inserted into the output file names
+	opts        A dict instance with options
+	plotopts    A dict instance with plot options
+
+	Output:
+	Multipage plot with density as function of z at the Fermi level, for all
+	    values of B.
+	Table with density as function of z and B, at the Fermi level.
+
+	No return value
+	"""
+	min_eres = get_min_eres()
+	erange = erange_from_target_eres(erange, min_eres)
+	temperature = get_dos_temperature(params, opts)
+	broadening = opts_to_broadening(opts, default = {'thermal': temperature})
+	precision = tableo.get_precision('table_dos_precision')
+	cmap_idos = get_config('color_idos')
+	cmap_localdos = get_config('color_localdos')
+	label_style, unit_style = tableo.get_label_unit_style()
+	dens_qty, dens_unit = get_dos_quantity_and_unit()
+
+	if opts.get('broadening_dep') is not None and not opts.get('broadening_dep').startswith('auto'):
+		sys.stderr.write("Warning (postprocess.densityz): Broadening dependence on momentum is ignored.\n")
+
+	sys.stderr.write("Density as function of z...\n")
+
+	# Get density as function of z and energy
+	densz = densityz_energy(
+		data, erange = erange, electrons = True, holes = True,
+		nz = params.nz, dz = params.zres, norb = params.norbitals,
+		broadening = broadening)
+	if densz is None:
+		sys.stderr.write("ERROR (postprocess.densityz): Density as function of z and E could not be obtained.\n")
+		return
+	densz = np.atleast_2d(densz)
+	zval = params.zvalues_nm()
+	ee = get_erange(erange)
+	
+	# Do scaling. Here, we note that the z coordinate makes the number of
+	# spatial dimensions one higher than set by params.kdim. Thus, we set
+	# kdim = 3 as argument to DensityScale. The multiplier for the size of the
+	# Brillouin zone is (2 pi)^2 [see densityz_energy()], but setting kdim = 3
+	# and dens_qty = 'k' uses the scaling multiplier (2 pi)^3, so in this case
+	# we compensate by a factor of 1 / 2 pi.
+	if dens_qty in ['k', 'momentum']:
+		densz /= 2 * np.pi
+	dscale = DensityScale(densz, dens_qty, dens_unit, kdim = 3)
+	ddensz = np.gradient(densz, axis=1) / np.gradient(ee)
+	
+	xlabel = ploto.format_axis_label("$z$", r"$\mathrm{nm}$")
+	fig_idos = ploto.densityz_energy(
+		params, zval, ee, densz, filename = "densz-energy-integrated%s.pdf" % outputid,
+		interpolate = True, xlabel = xlabel, colormap = cmap_idos,
+		posneg = True, contours = False, integrated = True, zunit = dscale,
+		**plotopts)
+	fig_dos = ploto.densityz_energy(
+		params, zval, ee, ddensz, filename = "densz-energy%s.pdf" % outputid,
+		interpolate = True, xlabel = xlabel, colormap = cmap_localdos,
+		posneg = False, contours = False, integrated = False, zunit = dscale,
+		**plotopts)
+	
+	idos_label = dscale.qstr(style = label_style, integrated = True)
+	idos_unit = dscale.unitstr(style = unit_style, integrated = True)
+	dos_label = dscale.qstr(style = label_style, integrated = False)
+	dos_unit = dscale.unitstr(style = unit_style, integrated = False)
+	tableo.simple2d(
+		"densz-energy-integrated%s.csv" % outputid, zval, ee,
+		dscale.scaledvalues(densz),
+		float_precision = (precision, 'g'), clabel = 'IDOS(z, E)',
+		axislabels = ["z", "E"], axisunits = ["nm", "meV"],
+		datalabel = idos_label, dataunit = idos_unit)
+	tableo.simple2d(
+		"densz-energy%s.csv" % outputid, zval, ee,
+		dscale.scaledvalues(ddensz),
+		float_precision = (precision, 'g'), clabel = 'DOS(z, E)',
+		axislabels = ["z", "E"], axisunits = ["nm", "meV"],
+		datalabel = dos_label, dataunit = dos_unit)
+
+	return
+
+## Density as function of z (experimental)
+def densityz_ll(params, data, erange, outputid, opts, plotopts, ll_full = False):
+	"""Total density of states. Version for LL data.
+
+	Arguments:
+	params      PhysParams instance
+	data        DiagData instance
+	erange      None or 2-tuple. Calculate densities within this energy range.
+	outputid    String that is inserted into the output file names
+	opts        A dict instance with options
+	plotopts    A dict instance with plot options
+	ll_full     True or False, whether using 'full' LL mode.
+
+	Output:
+	Multipage plot with density as function of z at the Fermi level, for all
+	    values of B.
+	Table with density as function of z and B, at the Fermi level.
+
+	No return value
+	"""
+	min_eres = get_min_eres()
+	erange = erange_from_target_eres(erange, min_eres)
+	temperature = get_dos_temperature(params, opts)
+	broadening = opts_to_broadening(opts, ll = True, default = {'thermal': temperature})
+
+	bzval = np.asarray(data.get_paramval('z'))
+	if broadening is not None:
+		broadening.apply_width_dependence(bzval, opts['broadening_dep'], in_place = True)
+
+	## Constant DOS contours
+	## Calculate LDOS if 'dos' command line argument is combined with 'localdos'
+	sys.stderr.write("Density as function of z...\n")
+	if 'cardens' in opts:
+		densval = opts['cardens']
+	else:
+		sys.stderr.write("ERROR (postprocess.densityz_ll): Density as function of z takes a single carrier density only. Use option cardens with a single value.\n")
+		return
+	if 'cardensrange' in opts and isinstance(opts['cardensrange'], (list, np.ndarray)) and len(opts['cardensrange']) > 1:
+		sys.stderr.write("Warning (postprocess.densityz_ll): Density as function of z takes a single carrier density only. Only the first value is considered.\n")
+
+	# Calculate IDOS for ee_at_idos
+	densitydata = integrated_dos_ll(data, erange, params, broadening = broadening)
+	if densitydata is None:
+		sys.stderr.write("ERROR (postprocess.densityz_ll): Calculation of density has failed.\n")
+		return
+	# E(n) needs to be calculated with subdiv = 1
+	_, ee_at_idos, ldos_at_idos = densitydata.energy_at_dos_ll(densval, do_ldos = True, subdiv = 1)
+
+	# Note get_density_ll is imported using 'from .density import ... as ...' to avoid name clash
+	densz_e = get_densityz_ll(
+		data, ee_at_idos[0], densitydata.ee, nz=params.nz, electrons=True, holes=False,
+		dz=params.zres, norb=params.norbitals, broadening=broadening)
+	densz_h = get_densityz_ll(
+		data, ee_at_idos[0], densitydata.ee, nz=params.nz, electrons=False, holes=True,
+		dz=params.zres, norb=params.norbitals, broadening=broadening)
+	densz = {'e': densz_e, 'h': densz_h}
+
+	ploto.densityz(
+		params, densz, filename = "densz%s.pdf" % outputid, legend = True,
+		title = '$B_z = %.3g$ T', title_val = bzval
+	)
+	# Format Bz value the same as in ploto.toolstext.get_partext()
+	tableo.densityz(
+		params, densz, "densz%s.csv" % outputid, xval = bzval, xlabel = "B_z",
+		xunit = "T"
+	)
+	return
+
+# Density of states (split by observable)
+def dos_ll_byobs(params, data, obs, erange, outputid, opts, plotopts, **kwds):
+	"""Density of states, split by observable value
+	Wrapper around dos_ll(). For arguments, see dos_ll.
+
+	Additional argument:
+	obs  String. Observable whose values are grouped and selected.
+
+	No return value
+	"""
+	accuracy = 1e-3
+	all_obsval = np.concatenate([ddp.get_observable(obs) for ddp in data.data])
+	obsval_round = np.round(all_obsval / accuracy) * accuracy
+	unique_values = np.unique(obsval_round)
+	if len(unique_values) > 4:
+		sys.stderr.write('Warning (postprocess.dos_ll_byobs): Too many different values for observable %s. Skip plot and output.\n' % str(obs))
+		return
+	for j, val in enumerate(unique_values):
+		outputid1 = outputid + ('-by%s-%i' % (str(obs), j))
+		datapoints_new = [ddp.select_obs(obs, val, accuracy = 0.5 * accuracy) for ddp in data]
+		if any([len(ddp.eival) <= 2 for ddp in datapoints_new]):
+			sys.stderr.write('Warning (postprocess.dos_ll_byobs): Not enough data for  %s = %g. Skip plot and output.\n' % (str(obs), val))
+			continue
+		data1 = DiagData(datapoints_new, grid = data.grid)
+		dos_ll(params, data1, erange, outputid1, opts, plotopts, **kwds)
+	return
+
+def localdos_k(params, data, erange, outputid, opts, plotopts, energies = None):
+	"""Plot local DOS as function of momentum.
+
+	Arguments:
+	params      PhysParams instance
+	data        DiagData instance
+	erange      None or 2-tuple. If set, do not plot Berry curvature for states
+	            outside of this energy range.
+	outputid    String that is inserted into the output file names
+	opts        A dict instance with options
+	plotopts    A dict instance with plot options
+	energies    A dict instance with special energies, e.g., e0 and e_cnp.
+
+	Output:
+	Local density of states as function of momentum and energy (plot)
+
+	No return value
+	"""
+	if energies is None:
+		energies = {}
+	e0 = energies.get('e0')
+	min_xres = get_min_xres()
+	min_eres = get_min_eres()
+	erange = erange_from_target_eres(erange, min_eres)
+	temperature = get_dos_temperature(params, opts)
+	broadening = opts_to_broadening(opts, default = {'thermal': temperature})
+	if opts.get('broadening_dep') is not None and not opts.get('broadening_dep').startswith('auto'):
+		sys.stderr.write("Warning (postprocess.localdos_k): Broadening dependence on momentum is ignored.\n")
+	dens_qty, dens_unit = get_dos_quantity_and_unit()
+
+	sys.stderr.write("Calculating local DOS...\n")
+	densitydata = local_integrated_dos(
+		data, erange, params, min_res = min_xres, broadening = broadening)
+	if densitydata is None:
+		sys.stderr.write("ERROR (postprocess.localdos_k): Calculation of density has failed.\n")
+		return
+
+	if len(energies) > 0:
+		densitydata.set_special_energies(**energies)
+
+	densitydata.set_scale(dens_qty, dens_unit)
+	hires = len(densitydata.xval) < 200  # minimal resolution: 200 points
+	ploto.local_density(
+		params, densitydata, integrated = False, outputid = outputid,
+		filename = "dos-local%s.pdf" % outputid, interpolate = True,
+		high_resolution = hires, colormap = get_config('color_localdos'),
+		**plotopts)
+	tableo.local_density(
+		params, densitydata, integrated = False,
+		clabel = 'LDOS({x}, E)', filename = "dos-local%s.csv" % outputid)
+	return
+
+def localdos_ll(params, data, erange, outputid, opts, plotopts):
+	"""Plot local DOS as function of magnetic field.
+
+	Arguments:
+	params      PhysParams instance
+	data        DiagData instance
+	erange      None or 2-tuple. If set, do not plot Berry curvature for states
+	            outside of this energy range.
+	outputid    String that is inserted into the output file names
+	opts        A dict instance with options
+	plotopts    A dict instance with plot options
+
+	Output:
+	Local density of states as function of magnetic field and energy (plot and
+	    table)
+	Differential local density of states (plot and table); this is the
+	    derivative of the previous quantity with respect to magnetic field. By
+	    virtue of the Streda formula, this is related to the Hall conductivity.
+
+	No return value.
+	"""
+	min_xres = get_min_xres()
+	min_eres = get_min_eres()
+	erange = erange_from_target_eres(erange, min_eres)
+	cmap_localdos = get_config('color_localdos')
+	cmap_idos = get_config('color_idos')
+	precision = tableo.get_precision('table_dos_precision')
+	temperature = get_dos_temperature(params, opts)
+	broadening = opts_to_broadening(opts, ll = True, default = {'thermal': temperature})
+	bzval = np.asarray(data.get_paramval('z'))
+	if broadening is not None:
+		broadening.apply_width_dependence(bzval, opts['broadening_dep'], in_place = True)
+	label_style, unit_style = tableo.get_label_unit_style()
+	label_ldos = {'none': None, 'false': None, 'raw': 'LDOS', 'plain': 'dn/dE', 'unicode': 'dn/dE', 'tex': r"$dn/dE$"}[label_style]
+	label_diffdos = {'none': None, 'false': None, 'raw': 'LDOS', 'plain': 'dn/dE', 'unicode': 'dn/dE', 'tex': r"$dn/dE$"}[label_style]
+	unit_ldos = format_unit('1/nm^2/meV', style = unit_style)
+	unit_diffdos = format_unit('e^2/h', style = unit_style, negexp = False)
+	magn_axislabel = ploto.format_axis_label("$B$", r"$\mathrm{T}$")
+	dens_qty, dens_unit = get_dos_quantity_and_unit()
+
+	## Calculate 'local' DOS
+	sys.stderr.write("Calculating DOS (local)...\n")
+	# Minimal resolution: 200 points
+	densitydata = integrated_dos_ll(
+		data, erange, params, broadening = broadening, min_res = min_xres)
+	if densitydata is None:
+		sys.stderr.write("ERROR (postprocess.localdos_ll): Calculation of density has failed.\n")
+		return
+
+	densitydata.set_scale(dens_qty, dens_unit)
+	bval, ee = densitydata.xval, densitydata.ee
+	zrange = plotopts.get('density_range')
+	hires = (len(bval) < 200)
+	ploto.local_density(
+		params, densitydata, outputid = outputid, interpolate = True,
+		high_resolution = hires, xlabel = magn_axislabel,
+		colormap = cmap_localdos, integrated = False,
+		yunit = False, zunit = True, zrange = zrange, **plotopts)
+	tableo.local_density(
+		params, densitydata, filename = "dos-local%s.csv" % outputid,
+		clabel = 'LDOS({x}, E)', integrated = False)
+
+	## Calculate 'differential' DOS
+	sys.stderr.write("Calculating DOS (differential)...\n")
+	ndos = densitydata.get_numeric_dos_ll(method = 'derivative')
+	legend = '$d\\mathrm{DOS}/dB$\n' + ploto.format_axis_unit('$e^2/h$') if "legend" in sys.argv else False
+	ploto.dos_ll(
+		params, bval, ee, ndos, filename = "dos-differential%s.pdf" % outputid,
+		energies = None, interpolate = True, xlabel = magn_axislabel,
+		xrange = plotopts.get('xrange'), colormap = cmap_idos, legend = legend)
+	tableo.simple2d(
+		"dos-differential%s.csv" % outputid, densitydata.xval, densitydata.ee, ndos,
+		float_precision = precision, clabel = 'dDOS/dB (B, E)',
+		axislabels = ["B", "E"], axisunits = ["T", "meV"],
+		datalabel = label_diffdos, dataunit = unit_diffdos)
+	return
+
+def banddos_k(params, data, erange, outputid, opts, plotopts, energies = None):
+	if energies is None:
+		energies = {}
+	e0 = energies.get('e0')
+	min_eres = get_min_eres()
+	erange = erange_from_target_eres(erange, min_eres)
+	temperature = get_dos_temperature(params, opts)
+	broadening = opts_to_broadening(opts, default = {'thermal': temperature})
+	if opts.get('broadening_dep') is not None and not opts.get('broadening_dep').startswith('auto'):
+		sys.stderr.write("Warning (postprocess.banddos_k): Broadening dependence on momentum is ignored.\n")
+
+	sys.stderr.write("Calculating DOS by band...\n")
+
+	densitydata = integrated_dos_by_band(data, erange, params, broadening = broadening)
+	if densitydata is None:
+		sys.stderr.write("ERROR (postprocess.banddos_k): Calculation of density has failed.\n")
+		return
+	elif not isinstance(densitydata, DensityDataByBand):
+		# Type check: Return type should be a dict with DensityData values.
+		raise TypeError("Invalid return value for integrated_dos()")
+	tableo.dos_byband(
+		"dos-byband%s.csv" % outputid, densitydata,	integrated = False,
+		showtotal = True)
+	tableo.dos_byband(
+		"dos-integrated-byband%s.csv" % outputid, densitydata, integrated = True,
+		showtotal = True)
+	return
+
+### BHZ/LOWDIN APPROXIMATION ###
+
+## BHZ calculation
+def bhz(params, data, erange, outputid, opts, plotopts, modelopts = {}):
+	"""BHZ approximation
+	Do the BHZ approximation (Löwdin partitioning) and provide a PDF with the
+	parameters and a visual comparison between the k.p and BHZ dispersions.
+
+	Arguments:
+	params      PhysParams instance
+	data        DiagData instance
+	erange      None or 2-tuple. If set, do not plot Berry curvature for states
+	            outside of this energy range.
+	outputid    String that is inserted into the output file names
+	opts        A dict instance with generic options
+	plotopts    A dict instance with plot options
+	modelopts   A dict instance with model options
+
+	Development note:
+	Default value for modelopts is not changed, hence safe.
+
+	No return value.
+	"""
+	bhzarg = cmdargs.bhz()
+	k0_bhz = opts.get('k0_bhz', 0.0)
+	if abs(k0_bhz) > 1e-6:
+		sys.stderr.write("Warning: BHZ at nonzero momentum is an experimental feature. Use with care.\n")
+		k0_bhz = Vector(k0_bhz, 0.0, astype = 'xy')  # TODO: Only at x axis for the moment
+	split = opts.get('split', 0.0)
+	if split < 1e-4:
+		sys.stderr.write("Warning: For BHZ fitting, setting a nonzero split is highly recommended.\n")
+
+	modelopts_bhz0 = {'energy': 0.0, 'neig': 50, 'lattice_reg': False, 'split': 0.0, 'ignorestrain': False, 'bia': False, 'axial': True, 'splittype': 'auto'}
+	mapping = {'targetenergy': 'energy'}
+	modelopts_bhz = cmdargs.initialize_opts(opts, modelopts_bhz0.copy(), mapping)
+	pot = modelopts.get('pot')
+	num_cpus = opts.get('cpu', 1)
+	if get_config_bool('lattice_regularization'):
+		sys.stderr.write("Warning (postprocess.bhz): Configuration option 'lattice_regularization=true' is ignored for BHZ calculation.\n")
+
+	bhz_basis, bhz_param, bhz_ham = do_bhz(
+		data, params, spin_obs = None, loc_obs = "wellext", par_obs = "isopz",
+		verbose = ("verbose" in sys.argv), angles = 6, bands_lower = bhzarg[0],
+		bands_a = bhzarg[1], bands_upper = bhzarg[2], num_cpus = num_cpus,
+		pot = pot, k0 = k0_bhz, **modelopts_bhz)
+	## Depending on the number of bands, the meaning of the output differs somewhat:
+	##   4 bands:             bhz_param = bhz parameters, bhz_ham = hamiltonian (symbolic)
+	## > 4 bands, multi dir:  bhz_param = [], bhz_ham = hamiltonian (symbolic)
+	## "0" bands:             an error has occurred
+	if len(bhz_basis) == 0:
+		sys.stderr.write("ERROR (postprocess.bhz): Perturbation theory has failed\n")
+		exit(1)
+
+	## Plot of the resulting fit
+	# Determine k values, ... (use a finer grid than for the full Hamiltonian)
+	k_bhz_points = get_config_int("bhz_points", minval = 0)
+	if k_bhz_points is None:
+		k_bhz_subdiv = 10
+	elif k_bhz_points > len(data):
+		k_bhz_subdiv = int(np.ceil(k_bhz_points / (len(data) - 1)))
+	else:
+		k_bhz_subdiv = 1
+
+	ks = data.get_momentum_grid()
+	_, k_comp, _, _ = ks.get_var_const()
+	if isinstance(k_comp, tuple):
+		sys.stderr.write("Warning (postprocess.bhz): Momentum grid subdivision only over first variable component\n")
+		k_comp = k_comp[0]
+	if k_comp.startswith('k'):
+		k_comp = k_comp[1:] if len(k_comp) > 1 else 'r'
+	k_bhz = ks.subdivide(k_comp, k_bhz_subdiv) if k_bhz_subdiv > 1 else ks
+	# ... gather data, ...
+	bhzdata = []
+	for k in k_bhz:
+		k1 = k if k0_bhz == 0.0 else Vector(k.x() - k0_bhz.x(), k.y() - k0_bhz.y(), astype='xy')
+		ham = bhz_ham.evaluate(k1, 0)
+		eival, eivec = nplin.eigh(ham)
+		bhzdata.append(DiagDataPoint(k, eival, eivec))
+	bhzdata = DiagData(bhzdata, grid = k_bhz)
+	# ... and add plot.
+	filename = "dispersion%s.pdf" % outputid
+	if len(data.shape) == 1 and os.path.isfile(filename):
+		ploto.add_bhz(
+			bhzdata, filename=filename, showplot=False,
+			title=plotopts.get('title'), k0=k0_bhz
+		)
+
+	## LaTeX output of the BHZ Hamiltonian
+	if bhz_ham is not None:
+		includeplot = filename if os.path.isfile(filename) else None
+		tex_print_bhz_matrix(
+			"bhz%s.tex" % outputid, bhz_ham, basis=bhz_basis,
+			includeplot=includeplot, k0=k0_bhz
+		)
+	return
+
+### OTHER ###
+
+## Quantities as function of z
+def q_z(params, outputid, pot=None, legend=False):
+	"""Output quantities as function of z
+
+	Arguments:
+	params     PhysParams instance
+	outputid   String
+	pot        Numpy array or None. If set, the band edges plus the potential is
+	           plotted in addition to the band edges.
+	legend     True or False. Whether to include plot legends.
+
+	No return value
+	"""
+	# Ev + V, Ec + V plot
+	if isinstance(pot, np.ndarray) and pot.ndim == 1:
+		zval = np.arange(0, params.nz, dtype=float)
+		ev_pot = params.z(zval)['Ev'] + pot
+		ec_pot = params.z(zval)['Ec'] + pot
+		ploto.q_z(
+			params, np.array([ev_pot, ec_pot]), ylabel="V", yunit="meV",
+			legend=["$E_v$", "$E_c$"], filename="qz-bands-plus-pot%s.pdf" % outputid)
+
+	# Ev, Ec plot
+	ploto.q_z(params, ['Ev', 'Ec'], filename="qz-bands%s.pdf" % outputid, legend=legend)
+
+	# Luttinger parameters plot
+	ploto.q_z(
+		params, ['F', 'gamma1', 'gamma2', 'gamma3', 'kappa'],
+		filename="qz-fgammakappa%s.pdf" % outputid, legend=legend)
+
+	# Exchange parameters plot
+	ploto.q_z(
+		params, ['exch_yNalpha', 'exch_yNbeta'],
+		filename="qz-exchange%s.pdf" % outputid, legend=legend)
+
+	# Table
+	qty = ['Ev', 'Ec', 'F', 'gamma1', 'gamma2',	'gamma3', 'kappa', 'exch_yNalpha', 'exch_yNbeta']
+	units = ['meV'] * 2 + [''] * 5 + ['meV'] * 2
+	tableo.q_z("qz%s.csv" % outputid, params, qty, units=units)
+
+	return
diff --git a/kdotpy-v1.0.0/src/kdotpy/potential.py b/kdotpy-v1.0.0/src/kdotpy/potential.py
new file mode 100644
index 0000000000000000000000000000000000000000..b5a2d637ffda4fe9e2da0485f15dcc3015793561
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/potential.py
@@ -0,0 +1,1057 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+from os import environ
+environ['OMP_NUM_THREADS'] = '1'
+import os
+import numpy as np
+import sys
+
+from .physconst import eovereps0
+from .potentialbc import eval_boundary_conditions
+from .integrate import integrate_arr, special_diff
+from .config import get_config
+
+### AUXILIARY FUNCTIONS ###
+
+
+def vector_norm(arr, norm = 'max'):
+	"""Apply array/vector norm according to given norm type"""
+	if norm == 'max':
+		return np.amax(np.abs(arr))
+	elif norm == 'rms':
+		return np.sqrt(np.mean(arr**2))
+	else:
+		raise ValueError("Invalid value for argument norm")
+
+def symmetrized_distance(arr, center, norm = 'max'):
+	"""Difference of array and its symmetrized version according to given norm type"""
+	symm_arr = symmetrize_array(arr, center)
+	return vector_norm(symm_arr - arr, norm)
+
+def antisymmetrized_distance(arr, center, norm = 'max'):
+	"""Difference of array and its antisymmetrized version according to given norm type"""
+	antisymm_arr = antisymmetrize_array(arr, center)
+	return vector_norm(antisymm_arr - arr, norm)
+
+def isint(x):
+	"""Test is value is integer"""
+	return np.round(x) == x
+
+def symmetrize_array(arr, center = None):
+	"""Symmetrize array around symmetry center.
+
+	Arguments:
+	arr      Numpy 1d-array. Array to be symmetrized.
+	center   Integer. Index of symmetry center.
+
+	Returns:
+	symmetric_array   Numpy 1d-array. Symmetrized array.
+	"""
+	if arr.ndim != 1:
+		raise ValueError("Argument arr must be a 1-dimensional array")
+	size = len(arr)
+	if center is None:
+		center = (size - 1) / 2  # calculate result below
+	if not isint(2 * center):
+		raise ValueError("Argument center must be an integer value or integer + 0.5")
+	if center < 0 or center > size - 1:
+		raise ValueError(f"Argument center out of range for array of size {size}")
+
+	# Find start/end index of left and right parts that compose the result
+	n_left = int(np.ceil(center))
+	n_right = n_left + 1 if isint(center) else n_left
+
+	if center < (size - 1) / 2:
+		symmetric_array = np.append(np.flip(arr[n_left:]), arr[n_right:])[-size:]
+	elif center > (size - 1) / 2:
+		symmetric_array = np.append(arr[:n_left], np.flip(arr[:n_right]))[:size]
+	else:
+		symmetric_array = 0.5 * (arr + arr[::-1])
+
+	return symmetric_array
+
+
+def antisymmetrize_array(arr, center = None):
+	"""Antisymmetrize array around (c, arr[c]) with c as symmetry center.
+
+	Arguments:
+	arr      Numpy 1d-array. Array to be antisymmetrized.
+	center   Integer. Index of symmetry center.
+
+	Returns:
+	antisymmetric_array   Numpy 1d-array. Antisymmetrized array.
+	"""
+	if arr.ndim != 1:
+		raise ValueError("Argument arr must be a 1-dimensional array")
+	size = len(arr)
+	if center is None:
+		center = (size - 1) / 2  # calculate result below
+	if not isint(2 * center):
+		raise ValueError("Argument center must be an integer value or integer + 0.5")
+	if center < 0 or center > size - 1:
+		raise ValueError(f"Argument center out of range for array of size {size}")
+
+	# Find start/end index of left and right parts that compose the result
+	n_left = int(np.ceil(center))
+	n_right = n_left + 1 if isint(center) else n_left
+
+	# Get offset (central value) and shift offset to zero
+	offset = arr[n_left] if isint(center) else (arr[n_left - 1] + arr[n_left]) / 2
+	arr_shift = arr - offset
+
+	# Antisymmetrize accordingly; mind possibly offsets in antisymmetric "function"
+	if center < (size - 1) / 2:
+		antisymmetric_array = np.append(-np.flip(arr_shift[n_left:]), arr_shift[n_right:])[-size:]
+	elif center > (size - 1) / 2:
+		antisymmetric_array = np.append(arr_shift[:n_left], -np.flip(arr_shift[:n_right]))[:size]
+	else:
+		antisymmetric_array = 0.5 * (arr_shift - arr_shift[::-1])
+
+	# Re-apply (i.e., add) offset to the result
+	antisymmetric_array += offset
+
+	return antisymmetric_array
+
+
+def auto_symmetrize(arr, center, threshold = 1e-12, verbose = False):
+	"""Symmetrize or antisymmetrize array if the result is sufficiently close to the original
+
+	Arguments:
+	arr         Numpy 1d-array. Array which will be checked for symmetry and
+	            (anti-)symmetrized.
+	center      Integer or float, being integer-valued or integer + 0.5. Index
+	            of symmetry center.
+	threshold   Float. Maximum distance between the (anti-)symmetrized array and
+	            the original. If the actual distance is lower than threshold,
+	            return the (anti-)symmetrized array, otherwise return the
+	            original.
+	verbose     True or False. If True, print what the function does.
+
+	Returns:
+	arr_new     Numpy 1d-array. (Anti-)symmetrized array or input array
+                depending on threshold.
+	"""
+	if arr.ndim != 1:
+		raise ValueError("Argument arr must be a 1-dimensional array")
+
+	if symmetrized_distance(arr, center) < threshold:
+		action = "Symmetrize"
+		arr_new = symmetrize_array(arr, center)
+	elif antisymmetrized_distance(arr, center) < threshold:
+		action = "Antisymmetrize"
+		arr_new = antisymmetrize_array(arr, center)
+	else:
+		action = "No symmetrization"
+		arr_new = arr
+	if verbose:
+		print(action)
+		print("symmetry_before     =", symmetrized_distance(arr, center))
+		print("symmetry_after      =", symmetrized_distance(arr_new, center))
+		print("antisymmetry_before =", antisymmetrized_distance(arr, center))
+		print("antisymmetry_after  =", antisymmetrized_distance(arr_new, center))
+	return arr_new
+
+
+def write_to_temp_file(filename, z, vz, new = False):
+	"""Write potential to temporary file (for debugging)
+
+	Arguments:
+	filename  String. Filename in the current working directory
+	z         List of axis values (e.g. z/growth direction)
+	vz        List of values along the axis
+	new       True or False. If True, write new file, overwriting if a file
+	          with the same name already exists. If False, append to an existing
+	          file.
+
+	No return value
+	"""
+	try:
+		f = open(filename, 'w' if new else 'a')
+	except:
+		return
+	if new and z is not None:
+		f.write(', '.join(["%s" % z1 for z1 in z]) + '\n')
+	f.write(', '.join(["%s" % v1 for v1 in vz]) + '\n')
+	f.close()
+
+
+def solve_potential(zval, densz, epsilonz, /, z1 = 0.0, z2 = 0.0, z3 = 0.0, v1 = None, v2 = None, v3 = None, v12 = None, dv1 = None, dv2 = None, dz = 1.0, verbose = False, well_center = None):
+	"""Solve potential based on density and dielectric constant as function of z.
+	That is, solve the potential V(z) from the Poisson equation
+	   d_z [epsilon(z) d_z V(z)] = rho(z) e / epsilon0
+	where epsilon(z) and rho(z) are the dielectric constant and charge density,
+	respectively, as function of z. Here, d_z denotes the derivative in z.
+
+	Arguments:
+	zval        Numpy array. The z coordinates where densz and epsilonz are
+	            defined.
+	densz       Numpy array. The density as function of z.
+	epsilonz    Numpy array. The dielectric constant epsilon as function of z.
+	z1, z2, z3  Integers or floats. Indices where boundary conditions act. They
+	            must be unequal.
+	v1, v2, v3  Floats. Potential values at z1, z2, and z3.
+	v12         Float. Difference V(z2) - V(z1).
+	dv1, dv2    Floats. Derivatives of the potential at z1 or z2.
+	dz          Float. The resolution of the z coordinate in nm.
+	verbose     True or False. If True, print diagnostic information to stdout.
+	well_center Integer. Index of center of well layer.
+
+	Returns:
+	vz          Numpy array. The potential that solves the Poisson equation.
+	"""
+	symmetrization_constraint = get_config('selfcon_symmetrization_constraint', choices = ['strict', 'loose', 'never'])
+
+	if symmetrization_constraint == 'strict':
+		symmetrize = True
+		symmetry_center = (len(densz) - 1) / 2
+	elif symmetrization_constraint == 'loose':
+		symmetrize = True
+		symmetry_center = well_center
+	elif symmetrization_constraint == 'never':
+		symmetrize = False
+		symmetry_center = None  # Won't be used
+
+	# We need to keep the density at the edges equal to 0; otherwise, the
+	# integrals may not always yield the expected values.
+	densz[1] += densz[0]
+	densz[0] = 0.0
+	densz[-2] += densz[-1]
+	densz[-1] = 0.0
+
+	if symmetrize:
+		# Symmetrize input arrays, but only if they are "sufficiently" symmetric already
+		if verbose:
+			print('densz:', end=' ')
+		densz = auto_symmetrize(densz, symmetry_center, verbose=verbose)
+
+	int_densz = integrate_arr(densz) * dz
+	if symmetrize:
+		if verbose:
+			print('intdensz:', end=' ')
+		int_densz = auto_symmetrize(int_densz, symmetry_center, verbose=verbose)
+
+	int_invepsilonz = integrate_arr(1. / epsilonz) * dz
+
+	int_dens_over_epsz = integrate_arr(int_densz / epsilonz) * dz
+	if symmetrize:
+		if verbose:
+			print('int_dens_over_epsz:', end=' ')
+		int_dens_over_epsz = auto_symmetrize(int_dens_over_epsz, symmetry_center, verbose=verbose)
+
+	if verbose:
+		print("solve_potential: Every 4th point from first and last z...")
+		print("int dens:")
+		print(int_densz[:40:4])
+		print("...")
+		print(int_densz[-40::4])
+		print()
+		print("int 1/eps")
+		print(int_invepsilonz[:40:4])
+		print("...")
+		print(int_invepsilonz[-40::4])
+		print()
+		print("int n/eps")
+		print(int_dens_over_epsz[:40:4])
+		print("...")
+		print(int_dens_over_epsz[-40::4])
+		print()
+
+	# Extract values from arrays
+	int_dens_over_eps_z1, int_dens_over_eps_z2, int_dens_over_eps_z3 = \
+		np.interp([z1, z2, z3], zval, int_dens_over_epsz)
+	int_invepsilon_z1, int_invepsilon_z2, int_invepsilon_z3 = \
+		np.interp([z1, z2, z3], zval, int_invepsilonz)
+	epsilon_z1, epsilon_z2 = np.interp([z1, z2], zval, epsilonz)
+	int_dens_z1, int_dens_z2 = np.interp([z1, z2], zval, int_densz)
+	int_dens_over_epsz_1_2 = int_dens_over_eps_z2 - int_dens_over_eps_z1
+	int_invepsilonz_1_2 = int_invepsilon_z2 - int_invepsilon_z1
+
+	# Define solutions
+	if v1 is not None and v2 is not None:
+		int_const = ((v2 - v1) - eovereps0 * int_dens_over_epsz_1_2) / int_invepsilonz_1_2
+		vz = v1 + eovereps0 * (int_dens_over_epsz - int_dens_over_eps_z1) + int_const * (int_invepsilonz - int_invepsilon_z1)
+
+	elif v12 is not None and v3 is not None:
+		int_const = (v12 - eovereps0 * int_dens_over_epsz_1_2) / int_invepsilonz_1_2
+		vz = v3 + eovereps0 * (int_dens_over_epsz - int_dens_over_eps_z3) + int_const * (int_invepsilonz - int_invepsilon_z3)
+
+	elif v1 is not None and dv2 is not None:
+		int_const = epsilon_z2 * dv2 - eovereps0 * int_dens_z2
+		vz = v1 + eovereps0 * (int_dens_over_epsz - int_dens_over_eps_z1) + int_const * (int_invepsilonz - int_invepsilon_z1)
+
+	elif v2 is not None and dv1 is not None:
+		int_const = epsilon_z1 * dv1 - eovereps0 * int_dens_z1
+		vz = v2 + eovereps0 * (int_dens_over_epsz - int_dens_over_eps_z2) + int_const * (int_invepsilonz - int_invepsilon_z2)
+
+	elif v1 is not None and dv1 is not None:
+		int_const = epsilon_z1 * dv1 - eovereps0 * int_dens_z1
+		vz = v1 + eovereps0 * (int_dens_over_epsz - int_dens_over_eps_z1) + int_const * (int_invepsilonz - int_invepsilon_z1)
+
+	elif v2 is not None and dv2 is not None:
+		int_const = epsilon_z2 * dv2 - eovereps0 * int_dens_z2
+		vz = v2 + eovereps0 * (int_dens_over_epsz - int_dens_over_eps_z2) + int_const * (int_invepsilonz - int_invepsilon_z2)
+
+	else:  # we should not end up here anyway
+		raise ValueError("Invalid combination of boundary conditions")
+
+	if symmetrize:
+		if verbose:
+			print('vz:', end=' ')
+		vz = auto_symmetrize(vz, symmetry_center, threshold=1e-10, verbose=verbose)
+
+	return vz
+
+
+def init_potential(params, cardens = None, n_depletion = None, l_depletion = None, v_outer = None, v_inner = None, efield = None, verbose = False, custom_bc = None):
+	"""Initial potential for the self-consistent calculations.
+
+	Arguments:
+	params         PhysParams instance.
+	cardens        Float. Initial carrier density, assumed to be uniform over
+	               the z direction.
+	n_depletion    Float. Density in the depletion layer.
+	l_depletion    Float. Thickness of the depletion layer in nm.
+	v_inner, v_outer   V_{top} - V_{bottom}; electrostatic potential; use
+		               v_outer to apply the potential to the edges of the stack,
+		               v_inner to apply the potential to the interfaces of the
+		               well
+	efield         A list/tuple length 2, where both elements are float or None.
+	               Electric field at bottom and top interface. None may be used
+	               as shortcut for [None, None].
+	verbose     True or False. If True, print diagnostic information to stdout.
+
+	Returns:
+	cardens    Float. Carrier density consistent with the density function.
+	qdensz     Numpy array. Charge density as function of z.
+	qdensz_bg  Numpy array. Density of background charges as function of z.
+	bc         Dict instance. The boundary conditions for the potential.
+	"""
+	nz = params.nz
+	dz = params.zres
+	zval = params.zvalues_nm()
+	epsilonz = np.array([params.z(z)['diel_epsilon'] for z in range(0, nz)])   # this is quite inefficient but will do the job
+	try:
+		i_bottom, i_top = params.well_z(strict = True)
+		z_bottom, z_top = params.well_z_nm(strict = True)
+	except:
+		sys.stderr.write("ERROR (init_potential): The well layer could not be identified. This is necessary for the SC Hartree calculation to proceed.\n")
+		raise
+	z_mid = (z_bottom + z_top) / 2  # point centered in the well
+
+	if efield is None:
+		efield = [None, None]
+
+	# initial density (zero everywhere)
+	qdensz = np.zeros(nz, dtype = float)
+	qdensz_bg = params.layerstack.get_density(np.arange(0, nz))
+
+	# handle inner or outer potential
+	if v_outer is None and v_inner is None:
+		v_tb, vz1, vz2 = None, zval.min(), zval.max()
+	elif v_outer is not None and v_inner is None:
+		v_tb, vz1, vz2 = v_outer, zval.min(), zval.max()
+	elif v_outer is None and v_inner is not None:
+		v_tb, vz1, vz2 = v_inner, z_bottom, z_top
+	else:
+		raise ValueError("At most one of arguments v_outer and v_inner may be given.")
+
+	# boundary conditions / initial potential
+	if cardens is not None:
+		# initial carrier density (uniformly distributed in well layer)
+		for z in range(i_bottom, i_top + 1):
+			qdensz[z] += -cardens / dz / ((i_top + 1) - i_bottom)
+		if efield[0] is not None and efield[1] is not None:
+			sys.stderr.write("ERROR (init_potential): Input of carrier density cannot be combined with two electric-field conditions.\n")
+			exit(1)
+	elif n_depletion is None and np.sum(np.abs(qdensz_bg)) < 1e-10 and efield[0] is not None and efield[1] is not None:
+		cardens = -(efield[1] * epsilonz[-1] - efield[0] * epsilonz[0]) / eovereps0
+		for z in range(i_bottom, i_top + 1):
+			qdensz[z] += -cardens / dz / ((i_top + 1) - i_bottom)
+	if n_depletion is not None:
+		if v_tb is not None and v_tb != 0.0:
+			sys.stderr.write("ERROR (init_potential): Input of depletion layer charge and background (gate) potential cannot be combined.\n")
+			exit(1)
+		if l_depletion is None:
+			ldep_b, ldep_t = None, None
+		elif isinstance(l_depletion, (float, np.floating)):
+			ldep_b, ldep_t = l_depletion, l_depletion
+		elif isinstance(l_depletion, (int, np.integer)):
+			ldep_b, ldep_t = l_depletion * dz, l_depletion * dz
+		elif isinstance(l_depletion, (list, tuple)) and len(l_depletion) == 2:
+			ldep_b, ldep_t = tuple(l_depletion)
+			if isinstance(ldep_b, int):
+				ldep_b *= dz
+			if isinstance(ldep_t, int):
+				ldep_t *= dz
+		else:
+			sys.stderr.write("ERROR (init_potential): The depletion layer length argument must be a number or a list/tuple of two numbers.\n")
+			exit(1)
+
+		if isinstance(n_depletion, (float, np.floating, int, np.integer)):
+			ndep_b, ndep_t = 0.5 * n_depletion, 0.5 * n_depletion
+		elif isinstance(n_depletion, (list, tuple)) and len(n_depletion) == 2:
+			ndep_b, ndep_t = tuple(n_depletion)
+		else:
+			sys.stderr.write("ERROR (init_potential): The depletion layer charge argument must be a number or a list/tuple of two numbers.\n")
+			exit(1)
+
+		# Determine background charge
+		if ldep_b is not None:
+			nz_dep_b = max(1, int(round(ldep_b / dz)))
+			i_dep_b = max(0, i_bottom - nz_dep_b)
+			if verbose:
+				print("B depl:", i_bottom, ldep_b, nz_dep_b, i_dep_b)
+			for z in range(i_dep_b, i_bottom):
+				qdensz_bg[z] += ndep_b / dz / nz_dep_b
+		if ldep_t is not None:
+			nz_dep_t = max(1, int(round(ldep_t / dz)))
+			i_dep_t = min(nz, i_top + 1 + nz_dep_t)
+			if verbose:
+				print("T depl:", i_top + 1, ldep_t, nz_dep_t, i_dep_t)
+			for z in range(i_top + 1, i_dep_t):
+				qdensz_bg[z] += ndep_t / dz / nz_dep_t
+	else:
+		ndep_b, ndep_t = 0.0, 0.0
+
+	ndep = ndep_b + ndep_t
+	net_charge = (np.sum(qdensz_bg) + np.sum(qdensz)) * dz
+
+	bc = eval_boundary_conditions(
+		params, net_charge = net_charge, cardens = cardens, efield = efield,
+		v_tb = v_tb, vz1 = vz1, vz2 = vz2, n_depletion = n_depletion,
+		ndep = ndep, ndep_b = ndep_b, ndep_t = ndep_t
+	)
+	if custom_bc is not None:
+		bc = bc.apply_custom(custom_bc)
+
+	bc = bc.validate(params)
+
+	if verbose:
+		print("Boundary conditions:", bc)
+		print(zval.min(), z_bottom, z_mid, z_top, zval.max())
+
+	return cardens, qdensz, qdensz_bg, bc
+
+
+def solve_densityz(zval, vz, epsilonz, z1 = 0.0, z2 = 0.0, z3 = 0.0, v1 = None, v2 = None, v3 = None, v12 = None, dv1 = None, dv2 = None, dz = 1.0, symmetrize = True, verbose = False):
+	"""Solve density based on potential and dielectric constant as function of z.
+	That is, determine the density rho(z) from the Poisson equation
+	   d_z [epsilon(z) d_z V(z)] = rho(z) e / epsilon0
+	where epsilon(z) and V(z) are the dielectric constant and potential,
+	respectively, as function of z. Here, d_z denotes the derivative in z.
+
+	The arguments z1, z2, z3; v1, v2, v3; v12; dv1, dv2 determine the boundary
+	conditions, see solve_potential() for more information. Here, only dv1 and
+	dv2 are relevant. The other cases are handled automatically, provided there
+	are no charges at the edges of the z range.
+
+	Arguments:
+	vz          Numpy array. The potential as function of z.
+	epsilonz    Numpy array. The dielectric constant epsilon as function of z.
+	z1, z2, z3  Integers. Determine the boundary conditions, see above.
+	v1, v2, v3, v12, dv1, dv2
+	            Floats. Determine the boundary conditions, see above.
+	dz          Float. The resolution of the z coordinate in nm.
+	symmetrize  NOT IMPLEMENTED
+	verbose     NOT IMPLEMENTED
+
+	Returns:
+	densz       Numpy array. The density as found from the Poisson equation.
+	"""
+	i1 = np.argmin(np.abs(zval - z1))
+	i2 = np.argmin(np.abs(zval - z2))
+	if np.abs(z1 - zval[i1]) > 1e-6 or np.abs(z2 - zval[i2]) > 1e-6:
+		sys.stderr.write("Warning (solve_densityz): Boundary conditions do not align with lattice in z direction. The result may be inaccurate.\n")
+
+	# Calculate dV/dz; use special_diff as inverse of basic_integration
+	if dv1 is not None:
+		dvz = special_diff(vz, y0 = dv1, i0 = i1) / dz
+	elif dv2 is not None:
+		dvz = special_diff(vz, y0 = dv2, i0 = i2) / dz
+	else:
+		dvz = special_diff(vz, automatic = True) / dz
+	# Determine density from the Poisson equation
+	densz = special_diff(epsilonz * dvz, y0 = 0.0) / dz / eovereps0
+	return densz
+
+def cardens_from_potential(vz, epsilonz, dz = 1.0):
+	"""Get carrier density from potential V(z)"""
+	dvz = np.gradient(vz) / dz
+	z1, z2 = 0, -1  # index range; customizable for debugging
+	return -(epsilonz[z2] * dvz[z2] - epsilonz[z1] * dvz[z1]) / eovereps0
+
+
+### FUNCTIONS FOR CALCULATING STATIC POTENTIALS ###
+
+def gate_potential(v_tb, params, inner = False):
+	"""Simulated gate potential
+
+	Apply a voltage difference Vg between "top" and "bottom" of the layer stack.
+	The boundary condition V(z0) = 0 fixes the value uniquely; here, z0 is the
+	center of the well, or the center of the full layer stack if the well layer
+	could not be identified.
+	Small values can be used for lifting the degeneracy between top and bottom
+	states.
+
+	Arguments:
+	v_tb     Float. The potential difference in meV between "top" and "bottom".
+	params   PhysParams instance.
+	inner    True or False. If True, apply the potential difference between top
+	         and bottom surface of the well layer. If False, apply it between
+	         top and bottom of the full layer stack.
+
+	Returns:
+	vz       Numpy array. The potential as function of z.
+	"""
+	nz = params.nz
+	dz = params.zres
+	zval = params.zvalues_nm()
+
+	## Get array of dielectric constants
+	# zval = (np.arange(0, nz, dtype = float) / (nz - 1) - 0.5) * params.lz_thick
+	epsilonz = np.array([params.z(z)['diel_epsilon'] for z in range(0, nz)])   # this is quite inefficient but will do the job
+	zif1, zif2 = params.well_z_nm()
+	if zif1 is None or zif2 is None:
+		sys.stderr.write("Warning (gate_potential): The well layer could not be identified. The boundary condition for the potential will refer instead to the center of the full layer stack.\n")
+		zmid = (zval.min() + zval.max()) / 2
+	else:
+		zmid = (zif1 + zif2) / 2  # point centered in the well
+
+	# initial density (zero everywhere)
+	densz_bg = np.zeros_like(zval)
+
+	# boundary conditions / initial potential
+	z1, z2 = zval.min(), zval.max()
+	if inner:
+		if zif1 is None or zif2 is None:
+			sys.stderr.write("Warning (gate_potential): The well layer could not be identified. The boundary condition for the potential will refer instead to the full layer stack.\n")
+		else:
+			z1, z2 = zif1, zif2
+	bc = {'v12': v_tb, 'z1': z1, 'z2': z2, 'v3': 0.0, 'z3': zmid}
+
+	vz = solve_potential(zval, densz_bg, epsilonz, dz = dz, **bc)
+	return vz
+
+def gate_potential_from_opts(params, opts):
+	"""Initialize gate potential from options
+
+	Argument:
+	params   PhysParams instance.
+	opts     Dict instance. The options dict from the command line arguments.
+
+	Returns:
+	pot      Numpy array or None. Return an array if opts contains any of vgate,
+	         vsurf, v_outer, v_inner, and the potential is not initialized from
+	         a file. Otherwise return None.
+	"""
+	if 'potentialfile' in opts:
+		return None
+	if not any(arg in opts for arg in ['vgate', 'vsurf', 'v_outer', 'v_inner']):
+		return None
+	v_inner = False
+	if 'v_gate' in opts and opts['vgate'] is not None:
+		vgate = opts['vgate']
+	elif 'v_outer' in opts and opts['v_outer'] is not None:
+		vgate = opts['v_outer']
+	elif 'v_inner' in opts and opts['v_inner'] is not None:
+		vgate = opts['v_inner']
+		v_inner = True
+	else:
+		vgate = 0.0
+	pot = gate_potential(vgate, params, inner = v_inner)
+	if 'vsurf' in opts and opts['vsurf'] is not None and opts['vsurf'] != 0.0:
+		pot += interface_potential(opts['vsurf'], opts['vsurf_l'], params, quadratic=opts['vsurf_quadratic'])
+	return pot
+
+def interface_potential(v_surf, vsurf_l, params, quadratic = False):
+	"""Interface potential
+
+	Potential that rises to v_surf at the interfaces/surfaces. It decreases
+	towards 0 at a distance vsurf_l from the interface.
+
+	Arguments:
+	v_surf     Float. Potential value at the interface in meV.
+	vsurf_l    Float. Length in nm in which the potential decreases to zero.
+	params     PhysParams instance.
+	quadratic  True or False. If True, then the potential decreases
+	           quadratically to zero, i.e., the potential dependence is a
+	           half-parabola at either side of the interface. If False, then the
+	           decrease is linear.
+
+	Returns:
+	vz       Numpy array. The potential as function of z.
+	"""
+	nz = params.nz
+	dz = params.zres
+	zint = params.zinterface
+
+	z = np.arange(0, nz, dtype = float)
+
+	z_if = np.array([(z - zi) * dz for zi in zint])
+	d_if = np.amin(np.abs(z_if), axis = 0) / vsurf_l
+	if quadratic:
+		vz = v_surf * np.maximum(1.0 - d_if, 0.0)**2
+	else:
+		vz = v_surf * np.maximum(1.0 - d_if, 0.0)
+	return vz
+
+def read_potential(params, *args, **kwds):
+	"""A wrapper around read_potential_file() that takes care of iteration over multiple arguments"""
+	if len(args) == 1 and isinstance(args[0], str):
+		return read_potential_file(params, args[0], **kwds)
+	if len(args) == 1 and isinstance(args[0], (list, tuple)):  # recursive call: expand list/tuple argument
+		return read_potential(params, *tuple(args[0]), **kwds)
+
+	all_mult = []
+	all_pot = []
+	for a in args:
+		if isinstance(a, str):
+			all_pot.append(read_potential_file(params, a, **kwds))  # also fine if it is None
+			all_mult.append(None)
+		elif isinstance(a, (float, int)):
+			if len(all_mult) == 0:
+				sys.stderr.write("Warning (read_potential): Multiplier without valid potential file.\n")
+			elif all_mult[-1] is None:
+				all_mult[-1] = float(a)
+			else:
+				sys.stderr.write("Warning (read_potential): Second and further multipliers are ignored.\n")
+
+	sum_pot = 0
+	for v, m in zip(all_pot, all_mult):
+		if v is None:
+			sys.stderr.write("Warning (read_potential): Invalid potential file.\n")
+			continue
+		try:
+			sum_pot += (1 if m is None else m) * v
+		except ValueError:
+			sys.stderr.write("ERROR (read_potential): Potentials could not be combined.\n")
+			return None
+	if not isinstance(sum_pot, np.ndarray):
+		sys.stderr.write("Warning (read_potential): No valid potential file.\n")
+		return None
+	return sum_pot
+
+def potential_file_overwrite_warning(output_file, input_file, directory = None):
+	"""Check if potential file will be overwritten and issue a warning if so.
+
+	Arguments:
+	output_file   String. The file name of the target file.
+	input_file    String, list/tuple, or None. If a string, the file name of the
+	              file from which the potential has been read, for which we
+	              check if it will be overwritten. If a list or tuple, iterate
+	              over the elements. If None, pass without doing anything.
+	directory     String or None. If not None, the directory of the input file
+	              (or files). If None, use the current working directory.
+
+	No return value
+	"""
+	if not isinstance(output_file, str):
+		raise TypeError("Argument output_file must be a string instance.")
+	if isinstance(input_file, (list, tuple)):
+		for fn in input_file:
+			if isinstance(fn, str):
+				potential_file_overwrite_warning(output_file, fn, directory = directory)
+	elif isinstance(input_file, str):
+		if directory is not None:
+			input_file = os.path.join(directory, input_file)
+		if os.path.exists(output_file) and os.path.samefile(output_file, input_file):
+			sys.stderr.write("Warning (potential_file_overwrite_warning): The potential file \'%s\' is overwritten.\n" % output_file)
+
+def read_potential_file(params, filename, axis = 'z', directory = None, kbs = None):
+	"""Read potential from a file.
+
+	If the input coordinates to not match the coordinates determined by params,
+	then use interpolation and/or extrapolation. The result is a function of
+	the coordinates defined by params.
+
+	Arguments:
+	params      PhysParams instance.
+	filename    String. The file name of the input file.
+	axis        None, 'z', or 'y'. The axis for which the potential is read.
+	kbs         ZippedKB instance.
+
+	Returns:
+	pot   Numpy array of dimension 1, if the input is a simple potential as
+	      function of z or y, i.e., for an input file which has only two columns
+	      (coordinate and potential values).
+	      OR
+	      Numpy array of dimension 2, if the input file defines a potential
+	      split by orbital. The output shape is (nzy, 8) where nzy is the
+	      number of coordinates in z or y direction and 8 is the number of
+	      orbitals.
+	      OR
+	      A dict instance, if the input file defines a potential split by
+	      subbands. The dict keys are the subband labels, the values are arrays
+	      of dimension 1.
+	"""
+	if directory is not None:
+		filename = os.path.join(directory, filename)
+	try:
+		pf = open(filename, 'r')
+	except:
+		sys.stderr.write("ERROR (read_potential): File not found.\n")
+		exit(1)
+
+	l = pf.readline().strip()
+	cols = [c.strip().lstrip().lower() for c in l.split(',')]
+
+	if not (axis is None or axis in ['y', 'z']):
+		sys.stderr.write("Warning (read_potential): Potential must be along z or y axis.\n")
+		pf.close()
+		return None
+	if axis is None:
+		if 'z' in cols and 'y' not in cols:
+			axis = 'z'
+		elif 'y' in cols and 'z' not in cols:
+			axis = 'y'
+		else:
+			sys.stderr.write("Warning (read_potential): Wrong file format; the file needs to be a CSV file (with columns separated by \',\') with column headings \'z\' (or \'y\') and \'potential\'.\n")
+			pf.close()
+			return None
+
+	if cols[0] == "\"potential(b" and cols[1] == "z)\"" and axis == 'z':
+		print("Multi-B format detected.")
+		pf.close()
+
+		zval = params.zvalues_nm()
+		bzs = kbs.b.get_values('bz')
+		pot = np.genfromtxt(
+			filename, skip_header=1, skip_footer=2,	usecols=range(len(cols) - 3),
+			delimiter=",")
+
+		# Check whether the stored potential is for the same z and B values that the calculation uses
+		if len(pot[:,0]) == len(zval) and len(cols[2:-2]) == len(bzs):
+			if np.allclose(pot[:,0], zval) and np.allclose(np.array(cols[2:-2], dtype=np.float64), bzs):
+				return pot[:,1:].T
+
+		sys.stderr.write("Warning (read_potential): Interpolation not implemented for this file format.\n")
+		return None
+
+	if axis not in cols or not any([col.startswith('potential') for col in cols]):
+		sys.stderr.write("Warning (read_potential): Wrong file format; the file needs to be a CSV file (with columns separated by \',\') with column headings \'z\' (or \'y\') and \'potential\' (or \'potential*\'.\n")
+		pf.close()
+		return None
+
+	zcol = cols.index(axis)
+	pcol = [i for i, col in enumerate(cols) if col.startswith('potential')]
+
+	read_zval = []
+	read_pval = []
+
+	for l in pf:
+		ldata = [d.strip().lstrip() for d in l.split(',')]
+		try:
+			z = float(ldata[zcol])
+			p = [float(ldata[col]) for col in pcol]
+		except:
+			continue
+		read_zval.append(z)
+		read_pval.append(p)
+
+	pf.close()
+	if len(read_zval) < 2:
+		sys.stderr.write("Warning (read_potential): Insufficient or incorrectly formatted data.\n")
+		return None
+
+	read_zval = np.array(read_zval)
+	read_pval = np.array(read_pval)
+
+	if axis == 'z':
+		zval = params.zvalues_nm()
+	elif axis == 'y':
+		zval = params.yvalues_nm()
+	pval = np.array([np.interp(zval, read_zval, read_pval1) for read_pval1 in read_pval.transpose()])
+
+	read_dz_l = read_zval[1] - read_zval[0]
+	read_dz_r = read_zval[-1] - read_zval[-2]
+	read_dp_l = read_pval[1] - read_pval[0]
+	read_dp_r = read_pval[-1] - read_pval[-2]
+
+	read_zmin = read_zval.min()
+	read_zmax = read_zval.max()
+	for j in range(0, len(zval)):
+		if zval[j] < read_zmin:
+			pval[:, j] = read_pval[0] + read_dp_l * (zval[j] - read_zmin) / read_dz_l
+		if zval[j] > read_zmax:
+			pval[:, j] = read_pval[-1] + read_dp_r * (zval[j] - read_zmax) / read_dz_r
+	if zval.min() <= read_zmin - read_dz_l or zval.max() >= read_zmax + read_dz_r:
+		sys.stderr.write("Warning (read_potential): Extrapolation was needed.\n")
+
+	if any([cols[col].startswith('potentialsub') for col in pcol]):
+		if not all([cols[col].startswith('potentialsub') for col in pcol]):
+			sys.stderr.write("ERROR (read_potential): Potential in subband basis cannot be mixed with other types.\n")
+			return None
+		if axis != 'y':
+			sys.stderr.write("ERROR (read_potential): Potential in subband basis must contain data along y direction.\n")
+			return None
+		subbands_in_cols = [cols[col][12:].lstrip('_-').upper() for col in pcol]
+		subbands_pval = {}
+		for j, sb in enumerate(subbands_in_cols):
+			if len(sb) < 2:
+				sys.stderr.write("Warning (read_potential): '%s' is not a valid subband identifier.\n" % sb)
+				continue
+			if sb[0] not in 'ELHS':
+				sys.stderr.write("Warning (read_potential): '%s' is not a valid subband identifier.\n" % sb)
+				continue
+			if sb.endswith('+') or sb.endswith('-'):
+				try:
+					sbnum = int(sb[1:-1])
+				except:
+					sys.stderr.write("Warning (read_potential): '%s' is not a valid subband identifier.\n" % sb)
+					continue
+				if sbnum <= 0:
+					sys.stderr.write("Warning (read_potential): '%s' is not a valid subband identifier.\n" % sb)
+					continue
+				subbands_pval[sb] = pval[j]
+			else:
+				try:
+					sbnum = int(sb[1:])
+				except:
+					sys.stderr.write("Warning (read_potential): '%s' is not a valid subband identifier.\n" % sb)
+					continue
+				if sbnum <= 0:
+					sys.stderr.write("Warning (read_potential): '%s' is not a valid subband identifier.\n" % sb)
+					continue
+				subbands_pval[sb] = pval[j]
+		return subbands_pval
+	if len(pcol) == 1 and cols[pcol[0]] == 'potential':
+		pval = pval.flatten()
+	else:
+		# pcols = [cols[col] for col in pcol]
+		pval_orb = np.zeros((8, pval.shape[1]), dtype = float)
+		for pcol_idx, col in enumerate(pcol):
+			colname = cols[col]
+			if colname == 'potential8':
+				for i in [2, 3, 4, 5]:
+					pval_orb[i] = pval[pcol_idx]
+			if colname == 'potential8h':
+				for i in [2, 5]:
+					pval_orb[i] = pval[pcol_idx]
+			if colname == 'potential8l':
+				for i in [3, 4]:
+					pval_orb[i] = pval[pcol_idx]
+			if colname == 'potential7':
+				for i in [6, 7]:
+					pval_orb[i] = pval[pcol_idx]
+			if colname == 'potential6':
+				for i in [0, 1]:
+					pval_orb[i] = pval[pcol_idx]
+		pval = pval_orb.transpose()
+	return pval
+
+def subband_potential(params, subbands_pot, overlap_vectors):
+	"""Expand subband potential dict to a potential landscape V(z, y) using the overlap vectors.
+
+	Given the (input) subband potential density v_i(y) where i is a subband
+	label, and the overlap eigenvectors psi_i(z), the resulting potential is
+	V(z, y) = sum_{i in subbands} |psi_i(z)|^2 v_i(y)
+	Note that the inputs v_i(y) have units meV / nm, so that the product with
+	the probability density |psi_i(z)|^2 yields a potential in meV.
+
+	Arguments:
+	params           PhysParams instance. Used to extract nz, ny, and norbitals.
+	subbands_pot     Dict instance, where the keys are the subband labels (see
+	                 note below) and the values are arrays of length ny; these
+	                 are the values v_i(y) in the equation above.
+	overlap_vectors  Dict instance, where the keys are the subband labels (see
+	                 note below) and the values are arrays of length
+	                 nz * norbitals; these are the eigenvectors psi(z) of the
+	                 system in a 2D geometry. We sum over the orbital degrees of
+	                 freedom.
+
+	Returns:
+	pot   An array of shape (nz, ny); the values V(z, y) in the equation above.
+
+	Notes:
+	The subband labels can be of the form H1, E1, etc. or H1+, H1-. We always
+	use uppercase labels. If for the subband potential H1 is given and the
+	overlap vectors give H1+ and H1-, the summation for V(z, y) will contain the
+	contribution
+	( |psi_{H1+}(z)|^2 + |psi_{H1-}(z)|^2 ) v_H1(y),
+	i.e., we sum over both H1+ and H1- in the same amounts.
+	TODO: Is this sensible? Or should we divide by 2?
+	"""
+	if not isinstance(subbands_pot, dict):
+		raise TypeError("Argument subbands_pot must be a dict instance.")
+	if overlap_vectors is None:
+		sys.stderr.write("ERROR (subband_potential): Overlap vectors are required for potential in subband basis.\n")
+		return None
+	elif not isinstance(overlap_vectors, dict):
+		raise TypeError("Argument overlap_vectors must be a dict instance or None.")
+
+	nz = params.nz
+	ny = params.ny
+	norb = params.norbitals
+	dz = params.zres
+
+	for subband in subbands_pot:
+		if subbands_pot[subband].shape != (ny,):
+			sys.stderr.write("ERROR (subband_potential): Potential data has invalid shape.\n")
+			return None
+	for ov in overlap_vectors:
+		if overlap_vectors[ov].shape != (nz * norb,):
+			sys.stderr.write("ERROR (subband_potential): Overlap vector has invalid shape.\n")
+			return None
+
+	pot = np.zeros((nz, ny), dtype = float)
+	for sb in subbands_pot:
+		if sb in overlap_vectors:
+			psiz2 = np.sum(np.reshape(np.abs(overlap_vectors[sb])**2, (nz, norb)), axis = 1)
+			pot += np.outer(psiz2 / dz, subbands_pot[sb])  # note factor 1 / dz, to get a proper probability density
+		else:
+			if sb + '+' in overlap_vectors:
+				psiz2 = np.sum(np.reshape(np.abs(overlap_vectors[sb + '+'])**2, (nz, norb)), axis = 1)
+				pot += np.outer(psiz2 / dz, subbands_pot[sb])
+			if sb + '-' in overlap_vectors:
+				psiz2 = np.sum(np.reshape(np.abs(overlap_vectors[sb + '-'])**2, (nz, norb)), axis = 1)
+				pot += np.outer(psiz2 / dz, subbands_pot[sb])
+			if sb + '+' not in overlap_vectors and sb + '-' not in overlap_vectors:
+				sys.stderr.write("Warning (subband_potential): Overlaps vector for subband '%s' is not defined.\n" % sb)
+	return pot
+
+def print_potential(params, pot):
+	"""Print potential and electric fields at interfaces and in the layers
+
+	The output is the electrostatic energy in mV and the electric field in
+	mV/nm. Note: The electrostatic potential is the opposite (sign-wise) of the
+	potential energy.
+
+	Arguments:
+	params    PhysParams instance.
+	pot       Numpy array. the potential energy in meV.
+
+	No return value.
+	"""
+	if pot is None:
+		sys.stderr.write("Warning (print_potential): Potential is not defined.\n")
+		return
+	if isinstance(pot, dict):
+		sys.stderr.write("Warning (print_potential): Potential in terms of subbands cannot be shown.\n")
+		return
+	if isinstance(pot, np.ndarray) and pot.ndim == 2 and pot.shape[1] == params.nz:
+		for pot1 in pot:
+			print_potential(params, pot1)
+		return
+
+	nz = params.nz
+	dz = params.zres
+	zint = np.array(params.zinterface)
+	zlay = np.array([0.5 * (zint[j] + zint[j + 1]) for j in range(0, len(zint) - 1)])
+
+	zall = np.empty((zint.size + zlay.size,), dtype=float)
+	zall[0::2] = zint
+	zall[1::2] = zlay
+
+	zval = (zall - 0.5 * nz + 0.5) * dz
+
+	potval = []
+	efield = []
+	# For the signs of the potential and E field, see the note above.
+	for z in zall:
+		zi = int(round(z))
+		if zi == 0:
+			potval.append(-pot[zi])
+			efield.append((pot[zi + 1] - pot[zi]) / dz)
+		elif zi == nz - 1:
+			potval.append(-pot[zi])
+			efield.append((pot[zi] - pot[zi - 1]) / dz)
+		elif int(round(2 * z)) % 2 == 1:  # z is a half-integer
+			zi1, zi2 = int(round(z - 0.5)), int(round(z + 0.5))
+			potval.append(-0.5 * (pot[zi1] + pot[zi2]))
+			efield.append((pot[zi2] - pot[zi1]) / dz)
+		else:  # z is an integer
+			potval.append(-pot[zi])
+			efield.append(0.5 * (pot[zi + 1] - pot[zi - 1]) / dz)
+
+	vmax = np.amax(np.abs(pot))
+	emax = np.amax(np.abs(np.array(efield)))
+
+	if vmax < 1e-4:
+		vmult = 1e-6
+		vunit = "nV"
+	elif vmax < 1e-1:
+		vmult = 1e-3
+		vunit = "\u00b5V"
+	elif vmax < 1e2:
+		vmult = 1.0
+		vunit = "mV"
+	else:
+		vmult = 1e3
+		vunit = "V "
+
+	if emax < 1e-4:
+		emult = 1e-6
+		eunit = "nV/nm"
+	elif emax < 1e-1:
+		emult = 1e-3
+		eunit = "\u00b5V/nm"
+	elif emax < 1e2:
+		emult = 1.0
+		eunit = "mV/nm"
+	else:
+		emult = 1e3
+		eunit = "V/nm "
+
+	if pot.ndim == 1:
+		print("   z        V(z)      E(z)")
+		unitstr = "  [nm]      [%s]     [%s]" % (vunit, eunit)
+	else:
+		norb = pot.shape[1]
+		print("Split by orbital")
+		print(    "   z        V(z)" + " " * (8 * norb - 3) + "E(z)")
+		unitstr = "  [nm]      " + "    ".join(["[%s]" % vunit for _ in range(norb)]) + "  " + " ".join(["[%s]" % eunit for _ in range(norb)])
+	unicodewarn = False
+	try:
+		print(unitstr)
+	except UnicodeEncodeError:
+		sys.stdout.buffer.write(unitstr.encode('utf-8') + b'\n')  # force unicode encoding
+		unicodewarn = True
+	for z, v, e in zip(zval, potval, efield):
+		if pot.ndim == 1:
+			print("%7.2f   %7.2f   %7.2f" % (z, v / vmult, e / emult))
+		else:
+			s = ("%7.2f" % z) + "  "
+			s += " ".join(["%7.2f" % x for x in (v / vmult)]) + "  "
+			s += " ".join(["%7.2f" % x for x in (e / emult)])
+			print(s)
+	if unicodewarn:
+		sys.stderr.write("Warning (print_potential): Some symbols could not be encoded in the output encoding (%s) and were forcibly converted to UTF-8. You may try to use 'export PYTHONIOENCODING=utf8' to get rid of this warning.\n" % sys.stdout.encoding)
+	return
diff --git a/kdotpy-v1.0.0/src/kdotpy/potentialbc.py b/kdotpy-v1.0.0/src/kdotpy/potentialbc.py
new file mode 100644
index 0000000000000000000000000000000000000000..6c2958f8da6fadd4643411f22d3ac2131b8bec79
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/potentialbc.py
@@ -0,0 +1,317 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+import sys
+
+### BOUNDARY CONDITION TOOLS ###
+
+def eval_boundary_conditions(
+		params, /, net_charge = 0.0, cardens = 0.0, efield = None, v_tb = None,
+		vz1 = None, vz2 = None, n_depletion = None, ndep = 0.0, ndep_b = 0.0,
+		ndep_t = 0.0):
+	"""Determine boundary conditions, in the following order of priority:
+	(1) By V_top - V_bottom
+	(2) Charge neutral sample
+	(3) By electric field
+	(4) Charged sample without depletion charges
+	(5) Depletion charge properties
+
+	Arguments:
+	net_charge  Float. Sum of carrier density and background density (in nm^-2).
+	cardens     Float. Carrier density (in nm^-2)
+	zif1, zif2, zmid
+	            Integers. Index of first and second interface between barrier
+	            and QW and center of QW, respectively.
+	efield      A list or tuple of length 2, where both elements are float or
+	            None. Electric field at bottom and top interface.
+	v_tb        Float. Potential difference between QW interfaces or along whole
+	            layer stack.
+	vz1, vz2    Integer. Indices of QW interfaces or top/bottom of layer stack.
+	            (Used in combination with v_tb).
+	n_depletion  Float. Density in the depletion layer.
+	ndep, ndep_b, ndep_t
+	            Floats. Total, bottom barrier and top barrier depletion
+	            densities.
+
+	Returns:
+	bc          Dict. Boundary conditions.
+	"""
+	zval = params.zvalues_nm()
+	z_bot, z_top = params.well_z_nm(strict = True)
+	z_mid = (z_bot + z_top) / 2
+	z_min, z_max = zval.min(), zval.max()
+	if vz1 is None:
+		vz1 = z_min
+	if vz2 is None:
+		vz2 = z_max
+	if efield is None:
+		efield = (None, None)
+
+	# Set z-coordinate of second boundary condition for electric fields
+	# depending on their individual values
+	efield_z2 = z_min if efield[0] == 0 else z_max if efield[1] == 0 else z_mid
+
+	# Evaluate boundary conditions
+	if v_tb is not None:  # and abs(v_tb) >= 1e-10:
+		bc = {'v12': v_tb, 'z1': vz1, 'z2': vz2, 'v3': 0.0, 'z3': z_mid}
+	elif abs(net_charge) < 1e-12:  # charge neutral
+		bc = {'v1': 0.0, 'z1': z_bot, 'v2': 0.0, 'z2': z_top}
+	elif efield[0] is not None:  # predefined electric field
+		bc = {'dv1': efield[0], 'z1': z_min, 'v2': 0.0, 'z2': efield_z2}
+	elif efield[1] is not None:  # predefined electric field
+		bc = {'dv1': efield[1], 'z1': z_max, 'v2': 0.0, 'z2': efield_z2}
+	elif abs(cardens) >= 1e-12 and n_depletion is None:  # carriers but without el. field; symmetric case
+		bc = {'v1': 0.0, 'z1': z_bot, 'v2': 0.0, 'z2': z_top}
+	elif abs(ndep) < 1e-12:  # antisymmetric case
+		bc = {'dv1': 0.0, 'z1': z_mid, 'v2': 0.0, 'z2': z_mid}
+	elif abs(ndep_b) < 1e-12:  # top layer only
+		bc = {'dv1': 0.0, 'z1': z_min, 'v2': 0.0, 'z2': z_min}
+	elif abs(ndep_t) < 1e-12:  # bottom layer only
+		bc = {'dv1': 0.0, 'z1': z_max, 'v2': 0.0, 'z2': z_max}
+	elif abs(ndep_t - ndep_b) < 1e-12:  # symmetric case
+		bc = {'v1': 0.0, 'z1': z_bot, 'v2': 0.0, 'z2': z_top}
+	else:
+		bc = {'dv1': 0.0, 'z1': z_mid, 'v2': 0.0, 'z2': z_mid}
+
+	return BoundaryConditions(params, bc)
+
+
+def validate_boundary_conditions(params, bc):
+	"""Check validity of boundary conditions.
+
+	The keys z1, z2, z3; v1, v2, v3; v12; dv1, dv2 determine the boundary
+	conditions. Only two boundary conditions may be set at a time, namely:
+	(v1, v2), (v1, dv2), (dv1, v2), (v1, dv1), (v2, dv2), (v12, v3)
+
+	Arguments:
+	params  PhysParams instance.
+	bc      Dict. Boundary conditions.
+
+	Returns:
+	bc_new  Dict. Fixed boundary conditions.
+	"""
+	valid_keys = ['v1', 'v2', 'v3', 'v12', 'dv1', 'dv2']
+	z1, z2, z3 = bc.get("z1"), bc.get("z2"), bc.get("z3")
+	v1, v2, v3 = bc.get("v1"), bc.get("v2"), bc.get("v3")
+	v12 = bc.get("v12")
+	dv1, dv2 = bc.get("dv1"), bc.get("dv2")
+	zval = params.zvalues_nm()
+	zres = params.zres
+
+	ncond = sum([1 for key in valid_keys if key in bc])
+	if ncond != 2:
+		sys.stderr.write("ERROR (validate_boundary_conditions): Exactly two boundary conditions must be given.\n")
+		exit(1)
+
+	if dv1 is not None and dv2 is not None:
+		sys.stderr.write("ERROR (validate_boundary_conditions): At least one of the boundary conditions must be a potential value. Two derivatives is not allowed.\n")
+		exit(1)
+
+	if (v12 is not None and v3 is None) or (v12 is None and v3 is not None):
+		sys.stderr.write("ERROR (validate_boundary_conditions): The boundary conditions v12 and v3 can only be used as a pair.\n")
+		exit(1)
+
+	for i, z in enumerate([z1, z2, z3]):
+		idx = str(i + 1)
+		if z is not None and (z < zval.min() - zres or z > zval.max() + zres):
+			sys.stderr.write(f"ERROR (validate_boundary_conditions): Boundary condition coordinate z{idx} out of range.\n")
+			exit(1)
+		if z is None and any([bc.get(key) is not None for key in valid_keys if idx in key]):
+			sys.stderr.write(f"ERROR (validate_boundary_conditions): Boundary condition coordinate z{idx} is missing.\n")
+			exit(1)
+		if z is not None:
+			i_z = np.argmin(np.abs(zval - z))
+			if np.abs(z - zval[i_z]) > 1e-6:
+				sys.stderr.write(f"Warning (validate_boundary_conditions): Boundary condition coordinate z{idx} does not align with coordinate lattice. The results may be inaccurate.\n")
+
+	bc_new = bc.copy()
+	if np.abs(z1 - z2) < 1e-10:
+		if dv1 is not None and v2 is not None:
+			bc_new['v1'] = bc_new['v2']
+			del bc_new['v2']
+		elif v1 is not None and dv2 is not None:
+			bc_new['v2'] = bc_new['v1']
+			del bc_new['v1']
+		elif v12 is not None:
+			sys.stderr.write("ERROR (validate_boundary_conditions): Coordinates z1 and z2 associated to v12 must be different.\n")
+			exit(1)
+		elif (v1 is not None and dv1 is not None) or (v2 is not None and dv2 is not None):
+			pass  # Ignore z2 if v1 and dv1 are used, ignore z1 if v2 and dv2 are used.
+		else:
+			sys.stderr.write("ERROR (validate_boundary_conditions): Boundary conditions must be defined at unequal positions.\n")
+			exit(1)
+
+	return bc_new
+
+class BoundaryConditions(dict):
+	"""A dict of boundary conditions with a few extra properties and functions
+
+	Attributes:
+	zres         Float. Resolution of the z coordinate grid.
+	zmin, zmax   Floats. Minimum and maximum z coordinate.
+	zlabelled    Dict. Contains labelled z coordinates. The keys are strings,
+	             the values are floats.
+	"""
+	def __init__(self, params, bc):
+		# Prepare z coordinates and indices
+		zval = params.zvalues_nm()
+		self.zres = params.zres
+		self.zmin, self.zmax = zval.min(), zval.max()
+		self.zlabelled = {'bottom': zval.min(), 'top': zval.max()}
+		zif1, zif2 = params.well_z_nm()
+		if zif1 is not None and zif2 is not None:
+			self.zlabelled.update({'bottom_if': zif1, 'top_if': zif2, 'mid': (zif1 + zif2) / 2})
+		# Validate input argument bc and set dict
+		validated_bc = validate_boundary_conditions(params, bc)
+		super().__init__(**validated_bc)
+
+	def apply_custom(self, custom_bc):
+		"""Apply boundary condition choices by user.
+
+		Arguments:
+		custom_bc   Dict. Boundary conditions given by user.
+
+		Returns:
+		self        BoundaryConditions instance. Overwritten or updated boundary
+		            conditions.
+		"""
+		user_bc = custom_bc.copy()  # copy dict to not change original one
+
+		# Replace z-coordinates with indices and remove keys that are absent in current_bc
+		# when current_bc will not be replaced by custom_bc
+		for key, val in custom_bc.items():
+			# Check appearance
+			if len(custom_bc) < 4 and key not in self:
+				sys.stderr.write(f"WARNING (BoundaryConditions.apply_custom): The key '{key}' could not be found in the automatically determined boundary conditions and will be ignored.\n")
+				del user_bc[key]
+				continue
+
+			# Replace z-coordinates with indices
+			if "z" not in key:
+				continue
+
+			if isinstance(val, str):  # label as input
+				if val not in self.zlabelled:
+					zlabelled_str = ", ".join(self.zlabelled.keys())
+					sys.stderr.write(f"ERROR (BoundaryConditions.apply_custom): The label '{val}' is not a valid z label. Choose from {zlabelled_str}.\n")
+					exit(1)
+				user_bc[key] = self.zlabelled[val]
+			else:  # coordinate as input
+				if val > self.zmax + self.zres or val < self.zmin - self.zres:
+					# out of range
+					sys.stderr.write(f"ERROR (BoundaryConditions.apply_custom): The z-value {val} nm is out of range for the given stack configuration. The limits are {self.zmin}, {self.zmax}.\n")
+					exit(1)
+				user_bc[key] = val
+
+		if len(user_bc) >= 4:  # overwrite completely by user input
+			self.clear()
+		self.update(user_bc)
+		return self
+
+	def test_potential(
+			self, zval, vz, tolerance = 1e-6, accept_shift = True,
+			verbose = False):
+		"""Check if a potential function satisfies a set of boundary conditions.
+		Compare the values of V(z) and dV/dz(z) from the array vz with the values
+		of v_i and/or dv_i at the points z_i given in the boundary conditions.
+
+		Arguments:
+		zval          Numpy array. The z coordinates for potential vz.
+		vz            Numpy array. The potential as function of z.
+		tolerance     Float. The tolerance for comparison of values.
+		accept_shift  True of False. Whether to accept a uniform shift in the values
+			          V(z).
+		verbose       True or False. If True, print the non-matching values.
+
+
+		Returns:
+		result      True or False. The result is False if any boundary condition is
+			        not satisfied, i.e., where the difference exceeds the tolerance.
+		"""
+		z1, z2, z3 = tuple(self.get(z) for z in ['z1', 'z2', 'z3'])
+		if z1 is None or z2 is None:
+			raise KeyError("BoundaryConditions instance must contain 'z1' and 'z2'.")
+		dz = self.zres
+		arr_val = {}  # stores all relevant values from vz
+
+		if 'v12' in self and 'v3' in self:
+			# Special case: (v12, v3)
+			if z3 is None:
+				raise KeyError("If BoundaryConditions instance contains 'v3', it must also contain 'z3'.")
+			v1, v2, v3 = np.interp([z1, z2, z3], zval, vz)
+			arr_val['v12'] = (v2 - v1)
+			arr_val['v3'] = v3
+		else:
+			# All other cases: (v1, v2), (v1, dv2), (dv1, v2), (v1, dv1), (v2, dv2)
+			dv1, dv2 = np.interp([z1, z2], zval, np.gradient(vz) / dz)
+			v1, v2 = np.interp([z1, z2], zval, vz)
+			arr_val['dv1'] = dv1
+			arr_val['dv2'] = dv2
+			arr_val['v1'] = v1
+			arr_val['v2'] = v2
+
+		delta_val = {}  # Gather all differences larger than tolerance
+		for key in ['v1', 'v2', 'v3', 'dv1', 'dv2', 'v12']:
+			if key in self and key in arr_val and abs(arr_val[key] - self[key]) > tolerance:
+				delta_val[key] = arr_val[key] - self[key]
+		if len(delta_val) == 0:
+			return True
+		if verbose:
+			for key in delta_val:
+				print(f"BC test: {key}(array) = {arr_val[key]}, {key}(bc) = {self[key]}")
+		# Check for uniform shift
+		if all(key in ['v1', 'v2', 'v3'] for key in delta_val):
+			values = list(delta_val.values())
+			if all(abs(val - values[0]) <= tolerance for val in values):
+				if verbose:
+					print(f"BC test: Uniform shift {values[0]}.")
+				return accept_shift
+		return False
+
+	def validate(self, params, in_place = True):
+		"""Validate this set of boundary conditions"""
+		validated_bc = validate_boundary_conditions(params, self)
+		if in_place:
+			self.clear()
+			self.update(validated_bc)
+			return self
+		else:
+			return BoundaryConditions(params, validated_bc)
diff --git a/kdotpy-v1.0.0/src/kdotpy/resumetools.py b/kdotpy-v1.0.0/src/kdotpy/resumetools.py
new file mode 100644
index 0000000000000000000000000000000000000000..6cb73f3f595056bfa9ec943ba43ecf2eb581cfdf
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/resumetools.py
@@ -0,0 +1,95 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import sys
+import os
+from time import time as rtime
+from datetime import datetime
+from pickle import dump, load
+
+from .cmdargs import outputid
+from .types import DiagDataPoint
+
+_start_timestamp = datetime.now()
+
+class ddp_tempfile:
+    """Context manager for pickled DiagDataPoint temporary files."""
+    def __init__(self, ddp, mode='wb', path=None):
+        if path is None:
+            path = "./temp%s_%s/" % (outputid(), _start_timestamp.isoformat(timespec='seconds').replace(':', '-'))
+        if path[-1] not in ['/', '\\']:
+            path += '/'
+        if not os.path.exists(path) and 'w' in mode:
+            os.mkdir(path)
+            sys.stderr.write("Created 'tempout' folder: %s\n" % path)
+        self.filepath = path + str(ddp).replace(' ', '') + '_' + ddp.hash_id() + ".tmp"
+        self.mode = mode
+        self.file_obj = None
+
+    def __enter__(self):
+        try:
+            self.file_obj = open(self.filepath, mode=self.mode)
+        except:  # suppress any file open errors
+            self.file_obj = None
+        return self.file_obj
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        if self.file_obj is not None:
+            self.file_obj.close()
+        return True  # suppress errors during with statement
+
+def save_ddp_tempfile(ddp):
+    """Save a DiagDataPoint instance as pickled file to temporary subdirectory."""
+    t0 = rtime()
+    with ddp_tempfile(ddp, mode='wb') as file:
+        dump(ddp, file)
+        if 'verbose' in sys.argv:
+            sys.stderr.write("%s exported successfully (%.3gs).\n" % (ddp, rtime()-t0))
+
+def load_ddp_tempfile(ddp, path):
+    """Try to load a pickled DiagDataPoint instance from a given directory."""
+    t0 = rtime()
+    with ddp_tempfile(ddp, mode='rb', path=path) as file:
+        ddp = load(file)
+        if isinstance(ddp, DiagDataPoint):
+            if 'verbose' in sys.argv:
+                sys.stderr.write("%s imported successfully (%.3gs).\n" % (ddp, rtime()-t0))
+            return ddp
+    return None
diff --git a/kdotpy-v1.0.0/src/kdotpy/selfcon.py b/kdotpy-v1.0.0/src/kdotpy/selfcon.py
new file mode 100644
index 0000000000000000000000000000000000000000..955e5eb9456692ef6baa2537ae55103f73c9041a
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/selfcon.py
@@ -0,0 +1,1676 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import sys
+import numpy as np
+import multiprocessing as mp
+
+from .config import get_config, get_config_bool, get_config_num, get_config_int
+from .erange import erange_from_target_eres
+from .potential import init_potential, solve_potential, cardens_from_potential
+from .bandalign import bandindices, bandindices_adiabatic, bandindices_adiabatic_ll
+from .parallel import parallel_apply
+from .diagonalization import DiagData, DiagDataPoint
+from .diagonalization import diagonalization as diag
+from .diagonalization import diagsolver as dsolv
+from .diagonalization import lldiagonalization as lldiag
+from .density import integrated_dos, densityz, densityz_surface_states, integrated_dos_ll, densityz_ll
+from .density import opts_to_broadening
+from .hamiltonian import hz_sparse_split
+from .symbolic import SymbolicHamiltonian
+from .momentum import Vector, VectorGrid
+from .models import ModelLL, ModelMomentum2D
+from .physconst import eoverhbar
+
+## Exception
+class SelfConError(Exception):
+	pass
+
+
+## Differential solvers
+def forward_euler(vz, diffs, h):
+	if isinstance(h, (float, int)):
+		return vz[-1] + h * diffs[-1]
+	elif isinstance(h, np.ndarray):
+		return vz[-1] + h[:, np.newaxis] * diffs[-1]
+
+
+class SelfConSolver:
+	"""Container for self-consistent potential solver, with input, output, and methods
+
+	Attributes (input as __init__ arguments):
+	kbs                ZippedKB instance. The grid of k and B values.
+	params             PhysParams instance.
+	target_accuracy    Float. Target accuracy to be reached for a successful
+	                   calculation (status 0)
+	time_step          Float (> 0). Initial time step for abstract time. This is
+	                   equivalent to the weight of the current iteration result
+	                   entering into the new potential value. Values > 1 are
+	                   allowed, but typically not useful.
+	min_iterations     Integer. Minimum number of iterations that are calculated
+	                   in any case.
+	max_iterations     Integer. Maximum number of iterations before calculation
+	                   is aborted.
+	keep_eidata        True or False. If True, keep the eigenvalues and -vectors
+	                   from the last diagonalization in memory as self.eidata.
+	                   Default is False.
+	num_cpus           Integer. Number of workers to use during parallelized
+		               diagonalization processes.
+	erange             Array or tuple. The energy range.
+	cardens            Float or None. The desired carrier density, measured as
+	                   particle density (positive for electrons, negative for
+	                   holes).
+    outputid		   String or None. User-set suffix for (temporary) output
+                       files. If None, do not insert anything.
+	opts               A dict instance. General options.
+	modelopts          A dict instance. Model options.
+	bandalign_opts     A dict instance. Options for band alignment.
+
+	Attributes (from configuration values):
+	acceptable_status  Integer, 0-4. Maximum acceptable status level:
+	                   0: Successful
+	                   1: Calculation skipped or aborted
+	                   2: Did not converge, but convergence is likely after more
+	                   iterations
+	                   3: Did not converge, convergence can not be estimated
+	                   4: Failed critically
+	broadening         BroadeningFunction or MultiBroadening instance.
+	cons_conv          Integer > 0. Consecutive converged iterations needed
+	                   for result to be considered "truly" converged.
+	check_chaos_steps  Integer > 0. The number of iterations used for checking
+	                   chaotic behaviour.
+	check_orbit_steps  Integer > 0. The number of iterations used for checking
+	                   periodic orbits.
+	diff_norm_method   String. Choose which norm is used to get a measure for
+	                   how far away the last potential difference is from
+	                   convergence. Currently implemented are 'max' and 'rms'.
+	debug              True or False. Enable or disable debug mode. In debug
+	                   mode, write temporary files and re-raise SelfConError and
+	                   KeyboardInterrupt within the iteration loop. This is
+	                   useful to get traceback (for debugging). Other exceptions
+	                   are always re-raised.
+	dynamic_time_step  True or False. Whether the time_step for the self-
+	                   consistent calculation is adapted automatically (True)
+	                   between iterations or a fixed value (False; default).
+	min_time_step      Float between 0 and 1. The lower limit for time_step in
+	                   dynamic time step adjustment.
+	potential_to_zero  True or False. Whether to subtract the average value from
+	                   the potential at each iteration.
+	out                Stream. Where to write the status messages. By default
+	                   equal to sys.stderr.
+
+	Attributes (set at initialization and iteration):
+	n_it               Integer. Current iteration number (read-only).
+	convergence_count  Integer. Consecutive converged iterations so far
+	                   (read-only).
+	status             Integer. Current status; see attribute acceptable_status
+	                   for a list. This also can have the additional value -1,
+	                   meaning the calculation has not been completed.
+	times              Array of floats. Cumulated (abstract) time at every
+	                   iteration.
+	diffsolver         Callable. The differential equation solver.
+	epsilonz           1-dim array. Values of the dielectric constant as
+	                   function of z
+	bc                 A dict instance. The boundary conditions.
+	vz                 List of 1-dim arrays. Potentials after each iteration
+	                   step. The array axis represents the z dependence.
+	diffs              1-dim array. Potential difference as given by using
+	                   diagonalize(), update_density() and solve_potential().
+	                   Depending on the differential solver and time_step,
+	                   diffs[i] may differ from vz[i] - vz[i-1].
+	pdensz             List of 1-dim arrays. Particle densities after each
+	                   iteration step. The array axis represents the z
+	                   dependence. Electron-like states count as positive,
+	                   hole-like states as negative.
+	densz_bg           1-dim array or None. Background density as function of z.
+	                   This density is added to the density coming from the band
+	                   structure prior to solving the potential. In other words,
+	                   this is the density without any filled state.
+	eidata             DiagData instance. The most recent diagonalization
+	                   result.
+	tempfiles          List of strings. File names for temporary files that have
+	                   already been used.
+	min_time_step      Float. Defines the minimum time step reachable, depending
+	                   on cardens and valence band contribution at the electro-
+	                   chemical potential. Set after first iteration. Only
+	                   relevant if dynamic_time_step is set to True. (read only)
+
+	Attributes (set at convergence):
+	densz_e            1-dim array or None. Electron density as function of z.
+	                   Set only after convergence.
+	densz_h            1-dim array or None. Hole density as function of z. Set
+	                   only after convergence.
+	special_energies   A dict instance. For ef0, ef, etc.; see density.py.
+
+	Methods:
+	next_iter          Method to check abortion criteria and output status
+	                   information
+	check_acceptable_status  Check whether the current status is acceptable.
+	"""
+	def __init__(
+			self, kbs, params, target_accuracy = 0.01, time_step = 0.9,
+			max_iterations = 10, min_iterations = 0, num_cpus = 1,
+			keep_eidata = False, erange = None, cardens = None, weight = None,
+			outputid = None, opts = None, modelopts = None, bandalign_opts = None):
+		"""Initialize self-consistent potential solver.
+
+		Arguments:
+		See Attribute information for class.
+		"""
+		# TODO: Type tests
+		self.params = params
+		self.num_cpus = num_cpus
+		self.cardens = cardens
+		self.outputid = outputid  # If None, do not use outputid
+
+		# Store well center separately for use in solve_potential()
+		try:
+			zif1, zif2 = params.well_z(strict = True)
+		except:
+			sys.stderr.write("ERROR (SelfConSolver): The well layer could not be identified. This is necessary for the SC Hartree calculation to proceed.\n")
+			raise
+		self.well_center = (zif1 + zif2) / 2  # point centered in the well
+
+		if min_iterations < 0:
+			raise ValueError("Argument min_iterations must be >= 0.")
+		self.min_iterations = min_iterations
+		if max_iterations < min_iterations:
+			max_iterations = min_iterations
+			sys.stderr.write(f"Warning (SelfConSolver): Maximum number of iterations must be at least the minimum number ({min_iterations}).\n")
+		self.max_iterations = max_iterations
+
+		if target_accuracy <= 0.0:
+			raise ValueError("Argument target_accuracy must be a positive number.\n")
+		elif target_accuracy < 5e-3:
+			sys.stderr.write("Warning (SelfConSolver): A small accuracy target may not be reachable within a reasonable number of iterations.\n")
+		self.target_accuracy = target_accuracy
+
+		if weight is not None:  # weight is kept as an alias for time_step
+			sys.stderr.write("Warning (SelfConSolver): Argument weight is a deprecated alias for time_step. Please use time_step instead.\n")
+			time_step = weight
+		if time_step <= 0.0:
+			raise ValueError("Argument time_step (weight) must be positive.\n")
+		elif time_step < 0.5:
+			sys.stderr.write("Warning (SelfConSolver): Small time_step (weight) may cause convergence to be slow.\n")
+		elif time_step > 1.0:
+			sys.stderr.write("Warning (SelfConSolver): Large time_step (weight) may lead to erratic results.\n")
+		self.time_step = time_step
+		self.dynamic_time_step = get_config_bool('selfcon_dynamic_time_step')
+		self.min_time_step = get_config_num('selfcon_min_time_step', 0.0, 1.0)
+		if self.time_step < self.min_time_step:
+			sys.stderr.write("Warning (SelfConSolver): Initial time step is set to a lower value than the minimal time step (configuration value 'selfcon_min_time_step').\n")
+		self.check_chaos_steps = get_config_int('selfcon_check_chaos_steps', minval=1)
+		self.check_orbit_steps = get_config_int('selfcon_check_orbit_steps', minval=1)
+
+		# Set options
+		self.keep_eidata = keep_eidata
+		self.opts = opts if isinstance(opts, dict) else {}
+		# Copy modelopts here so we can later modify them
+		self.modelopts = modelopts.copy() if isinstance(modelopts, dict) else {}
+		self.bandalign_opts = bandalign_opts if isinstance(bandalign_opts, dict) else {}
+
+		# Initialize status and set acceptable status; set debug mode
+		self.acceptable_status = get_config_int('selfcon_acceptable_status', minval = 0, maxval = 4)
+		self.status = -1
+		self.debug = get_config_bool('selfcon_debug')
+		self.out = sys.stderr
+		self.potential_to_zero = get_config_bool('selfcon_potential_average_zero')
+
+		# Initialize iteration properties
+		self.n_it = 0
+		self.cons_conv = get_config_int('selfcon_convergent_steps', minval = 1)
+		self.times = [0.0]
+		self.diffsolver = forward_euler
+		self.diff_norm_method = get_config('selfcon_diff_norm', choices = ['max', 'rms'])
+
+		# Set grid (k and B values)
+		if len(kbs) <= 1:
+			sys.stderr.write("Warning (SelfConSolver): Nothing to be done.\n")
+			self.status = 1
+		self.kbs = kbs
+
+		# Set array of energies
+		self.min_eres = get_config_int('selfcon_energy_points', minval = 0)
+		self.erange = erange_from_target_eres(erange, self.min_eres)
+
+		# Set broadening (analogous to postprocess.py)
+		temperature = opts.get('tempbroadening')
+		temperature = params.temperature if temperature is None else temperature
+		self.broadening = opts_to_broadening(opts, default = {'thermal': temperature})
+
+		# Set array of dielectric constants
+		# (This method is quite inefficient but will do the job)
+		self.epsilonz = np.array([params.z(z)['diel_epsilon'] for z in range(0, params.nz)])
+
+		# Set other attributes to trivial values
+		self.bc = {}
+		self.vz = []
+		self.diffs = []
+		self.pdensz = []
+		self.pdensz_bg = None
+		self.pdensz_offset = None
+		self.pdensz_e = None
+		self.pdensz_h = None
+		self.eidata = None
+		self.special_energies = {}
+		self.tempfiles = []
+		self.convergence_count = 0
+		self.n_offset = None
+
+	def init_potential(
+			self, potential = None, cardens = None, n_bg = None, n_offset = None,
+			n_surf = None, d_surf = 8.0, **kwds):
+		"""Initialize the potential.
+
+		Arguments:
+		potential   1-dim array or None. If an array, take the potential from
+		            this array, for example pre-loaded from a file. Otherwise,
+		            initialize potential using **kwds if the configuration value
+		            selfcon_use_init_density is True and set it to zero if it is
+		            False.
+		cardens     Float or None. The desired carrier density. If None, use the
+		            carrier density set by SelfConSolver.__init__().
+		n_bg        Float or None. The background density (uniformly distributed
+		            in z) that contributes to the total carrier density
+		            distribution used in solve_potential() but not to cardens.
+		n_offset    Float or None. Offset carrier density which contributes to
+		            cardens. The offset carrier distribution is calculated from
+		            the solved Hamiltonian in each iteration and is subtracted
+		            from the total carrier density distribution used in
+		            solve_potential().
+		n_surf      Number, 2-tuple, or None. If numeric, apply this surface
+		            density (in nm^-2) to both bottom and top surface in the
+		            well layer. If a 2-tuple, apply two different densities to
+		            bottom and top layer, respectively. If one of the two values
+		            is None, that respective surface is not considered, i.e.,
+		            the bulk extends completely to the interface of the well
+		            layer. The value (None, None) is not permitted.
+		d_surf      Number. Thickness of the surface layer(s) in nm.
+		**kwds      The keyword arguments n_depletion, l_depletion, v_inner,
+		            v_outer, efield. These are passed to the function
+		            init_potential (potential.py). Note that even if potential
+		            is an array, the keyword arguments are used to determine
+		            carrier density, boundary conditions, etc.
+		"""
+		verbose = 'verbose' in sys.argv
+		nz = self.params.nz
+		dz = self.params.zres
+		zval = self.params.zvalues_nm()
+		cardens = self.cardens if cardens is None else cardens
+		cardens, qdensz, qdensz_bg, bc = init_potential(self.params, cardens = cardens, verbose = verbose, **kwds)
+		self.pdensz_offset = np.zeros_like(qdensz)
+		if cardens is None:
+			# Even after init_potential() cardens is still None.
+			# Handle it like a carrier density of 0 e/nm^2
+			cardens = 0
+		if n_bg is not None:
+			# background density
+			zif1, zif2 = self.params.well_z(strict=True)  # should always work, since this is already checked in init_potential()
+			qdensz_bg = np.zeros(nz, dtype=float)
+			qdensz_bg[zif1: zif2 + 1] -= n_bg / dz / ((zif2 + 1) - zif1)
+		if n_offset is not None:
+			# density offset
+			# calculates pdensz_offset in each iteration in self.update_potential()
+			# which will be subtracted from pdensz for solve_potential()
+			cardens += n_offset
+			self.n_offset = n_offset
+		qdensz = np.zeros_like(qdensz)
+		if potential is None:
+			if get_config_bool('selfcon_use_init_density'):
+				vz = solve_potential(
+					zval, qdensz + qdensz_bg, self.epsilonz, dz = dz,
+					verbose = verbose, well_center = self.well_center, **bc
+				)
+			else:
+				vz = np.zeros_like(qdensz)
+		elif isinstance(potential, np.ndarray):
+			if potential.ndim != 1:
+				raise ValueError("Only single potential (1d array) supported")
+			if potential.shape[0] != nz:
+				raise ValueError(f"Argument potential has length {potential.shape[0]}, expected {nz}")
+			vz = potential
+			if not bc.test_potential(zval, vz, verbose = True):
+				sys.stderr.write("Warning (SelfConSolver.init_potential): Input potential is incompatible with the boundary conditions.\n")
+			cardens_from_vz = cardens_from_potential(vz, self.epsilonz, dz = dz)
+			if cardens is not None and abs(cardens_from_vz - cardens) > 1e-6:
+				sys.stderr.write(f"Warning (SelfConSolver.init_potential): Carrier density from input potential ({cardens_from_vz:4g}) differs from carrier density from direct input or from boundary conditions ({cardens:4g}).\n")
+		else:
+			raise NotImplementedError
+		self.cardens = cardens
+		self.pdensz.append(-qdensz)
+		if n_surf is not None:  # Experimental: surface state background density
+			self.pdensz_bg = densityz_surface_states(self.params, n_surf, d_surf)
+		else:
+			self.pdensz_bg = -qdensz_bg
+		self.bc = bc
+		self.vz.append(vz)
+		self.write_to_tempfile('scpot.csv', zval, vz)
+		self.write_to_tempfile('scdens.csv', zval, qdensz)
+		if verbose:
+			print("Pot:")
+			print(vz)
+			print("Potential antisymmetric part:")
+			print(vz - vz[::-1])
+			print("Potential antisymmetry")
+			print(np.amax(np.abs(vz - vz[::-1])))
+
+	def message(self, msg, verbose = False):
+		if self.out is not None:
+			self.out.write("Info (SelfConSolver): " + msg)
+
+	def write_to_tempfile(self, filename, z, vz):
+		"""Write potential to temporary file (for debugging)
+
+		Arguments:
+		filename    String. Filename in the current working directory
+		z           List of axis values (e.g. z/growth direction)
+		vz          List of values along the axis
+
+		No return value
+		"""
+		if not self.debug:
+			return
+		if self.outputid is not None:
+			# Split filename at file extension, insert outputid, and recombine
+			fname, fext = filename.rsplit(".", 1)
+			filename = f"{fname}{self.outputid}.{fext}"
+		new = filename not in self.tempfiles
+		if new:
+			self.tempfiles.append(filename)
+		try:
+			f = open(filename, 'w' if new else 'a')
+		except:
+			return
+		if new and z is not None:
+			f.write(', '.join([f"{z1}" for z1 in z]) + '\n')
+		f.write(', '.join([f"{v1}" for v1 in vz]) + '\n')
+		f.close()
+
+	def write_bandindex_tempfile(self, b_max = 12, filename = "scbandalign.csv"):
+		"""Write energies and band characters to output file"""
+		b_idx = [b for b in range(-b_max, b_max + 1) if b != 0]
+		def eival_fmt(x):
+			return "" if x is None else f"{x:.3f}"
+		eidata0 = self.eidata.get_base_point()
+		b_eival = [eival_fmt(eidata0.get_eival((b,))) for b in b_idx]
+		b_char = [eidata0.get_char((b,)) for b in b_idx]
+		self.write_to_tempfile(filename, b_idx, b_eival)
+		self.write_to_tempfile(filename, b_idx, b_char)
+
+	def diagonalize(self):
+		if len(self.vz) < 1:
+			raise ValueError("SelfConSolver potential has not been initialized")
+
+		modelopts = self.modelopts.copy()
+		modelopts['pot'] = self.vz[-1]
+		if 'obs' in modelopts:
+			del modelopts['obs']
+		if 'dimful_obs' in modelopts:
+			del modelopts['dimful_obs']
+
+		# Determine band indices by slowly increasing the potential. The
+		# following function applies band alignment to the Hamiltonian
+		# H = H0 + alpha V, where H0 is the Hamiltonian without potential, V is
+		# the potential, and alpha is increased in small steps from 0 to 1. The
+		# result is the DiagDataPoint for alpha = 1, which has its band indices
+		# set.
+		eidata_k0 = bandindices_adiabatic(
+			self.params, pot = self.vz[-1], num_cpus = self.num_cpus,
+			modelopts = modelopts, bandalign_opts = self.bandalign_opts)
+
+		# Diagonalization over grid
+		modelopts['return_eivec'] = True
+		self.eidata = DiagData(parallel_apply(
+			diag.hz, self.kbs, (self.params,), f_kwds = modelopts,
+			num_processes = self.num_cpus, propagate_interrupt = True,
+			description = f"SC Hartree iteration {self.n_it}"),
+			grid = self.kbs.get_grid())
+		self.eidata.set_char(eidata_k0)  # Store band characters
+		bandindices(self.eidata, input_data = eidata_k0, params = self.params)
+
+		# Write temp file with band energies and characters as function of index
+		self.write_bandindex_tempfile()
+
+	def update_density(self, finalization = False):
+		"""Update density from diagonalization data"""
+		if self.eidata is None:
+			raise ValueError("No diagonalization data. Use diagonalize() before update_density().")
+		# In the following, the argument 'cardens' invokes a high-precision
+		# calculation of the chemical potential; it is "hidden" in the output 'ef'.
+		densitydata = integrated_dos(self.eidata, self.erange, self.params, broadening = self.broadening)
+		chem_pot = densitydata.energy_at_idos(self.cardens, save_as = 'ef')
+		self.special_energies = densitydata.special_energies
+
+		if chem_pot is None:
+			self.status = 4
+			raise SelfConError("Unable to determine Fermi energy/chemical potential.\n")
+
+		# Calculate offset density
+		if self.n_offset is not None:
+			chem_pot_offset = densitydata.energy_at_idos(self.n_offset)
+			self.pdensz_offset = densityz(
+				self.eidata, chem_pot_offset, self.erange,
+			  	self.params.nz, dz = self.params.zres, norb=self.params.norbitals,
+			  	broadening=self.broadening, electrons=True, holes=True
+			)
+
+		# For finalization, calculate electron and hole density, then exit
+		if finalization:
+			self.pdensz_e = densityz(
+				self.eidata, chem_pot, self.erange, self.params.nz,
+				dz = self.params.zres, norb = self.params.norbitals,
+				broadening = self.broadening, electrons=True, holes=False
+			)
+			self.pdensz_h = densityz(
+				self.eidata, chem_pot, self.erange, self.params.nz,
+				dz = self.params.zres, norb = self.params.norbitals,
+				broadening = self.broadening, electrons=False, holes=True
+			)
+			return
+
+		# In older versions, the result of densityz() was explicitly multiplied
+		# by 1 / (2 pi)^2. This factor has been dropped here, as it is now
+		# applied internally in densityz_energy(), a function called by
+		# densityz().
+		pdensz = densityz(
+			self.eidata, chem_pot, self.erange, self.params.nz,
+			dz = self.params.zres, norb = self.params.norbitals,
+			broadening = self.broadening, electrons=True, holes=True
+		)
+		self.pdensz.append(pdensz)
+
+		# Debug output
+		zval = self.params.zvalues_nm()
+		self.write_to_tempfile("scdens.csv", zval, -pdensz)  # qdensz = -pdensz
+		if 'verbose' in sys.argv:
+			print("Density: rho =", np.sum(pdensz) * self.params.zres)
+			print(pdensz[:8])
+			print("...")
+			print(pdensz[-8:])
+			print("Density antisymmetry:", np.amax(np.abs(pdensz - pdensz[::-1])))
+
+	def update_potential(self):
+		"""Update potential (difference) from density"""
+		if len(self.diffs) + 1 >= len(self.pdensz):  # array diffs is always one shorter
+			raise ValueError("Diff has already been updated. First use update_density() (again).")
+		zval = self.params.zvalues_nm()
+		qdensz = -(self.pdensz[-1] + self.pdensz_bg)
+		verbose = 'verbose' in sys.argv
+		vz = solve_potential(
+			zval, qdensz, self.epsilonz, dz = self.params.zres,
+			verbose = verbose, well_center = self.well_center, **self.bc
+		)
+
+		if verbose:
+			print("Potential antisymmetry:", np.amax(np.abs(vz - vz[::-1])))
+		if self.potential_to_zero:  # put potential average at zero
+			vz_avg = np.sum(vz) / len(vz)
+			vz -= vz_avg
+
+		# Calculate potential step and apply diffsolver to get the new potential
+		self.diffs.append(vz - self.vz[-1])
+
+		# Debug output
+		self.write_to_tempfile('scpot.csv', zval, vz)
+
+	def apply_diff(self):
+		"""Update potential from potential difference"""
+		if len(self.vz) >= len(self.diffs) + 1:  # array diffs is always one shorter
+			raise ValueError("Potential has already been updated. First use update_diff() (again).")
+		vz_new = self.diffsolver(self.vz, self.diffs, self.time_step)
+		self.vz.append(vz_new)
+
+	def do_iteration(self):
+		"""Do iteration step.
+
+		Returns:
+		success  True (succesful) or False (failed)
+		"""
+		self.n_it += 1
+		time_new = self.times[-1] + self.time_step
+		self.times.append(time_new)
+		self.message(f"Iteration #{self.n_it} (t = {time_new}):\n")  # :.4g
+		self.diagonalize()
+		self.update_density()
+		self.update_potential()
+		self.apply_diff()
+		return self.diffs[-1]
+
+	def check_status(self):
+		"""Check if status is acceptable."""
+		return self.status <= self.acceptable_status
+
+	def get_diff_norm(self, arr = None):
+		"""Calculate a measure for convergence from last diff, depending on diff_norm_method.
+
+		Arguments
+		arr   Numpy array or None. If None, use the last entry in self.diffs. If
+		      arr is set, use that array instead.
+		"""
+		if arr is None:
+			arr = self.diffs[-1]
+		if self.diff_norm_method == 'max':  # maximum (a.k.a. sup or L-infinity norm)
+			return np.amax(np.abs(arr))
+		elif self.diff_norm_method == 'rms':  # root-mean-square (L2 norm)
+			return np.sqrt(np.mean(arr**2))
+		else:  # not implemented
+			raise NotImplementedError(f"Diff norm {self.diff_norm_method} not implemented")
+
+	def get_distances(self, arr = None):
+		"""Get distances of the last value of the array (vz) to the previous ones
+
+		Argument:
+		arr   Numpy array or None. If None, use self.vz. If arr is set, use that
+		      array instead.
+		"""
+		arr = np.asarray(self.vz) if arr is None else np.asarray(arr)
+		if arr.ndim != 2:
+			raise ValueError("Array must be 2 dimensional")
+		return np.array([self.get_diff_norm(arr[-1] - x) for x in arr])
+
+	def adjust_time_step(self, factor=None, offset=None):
+		"""Adjust time step to absolute or relative value.
+
+		The result is time_step_old * factor + offset. This value is clipped
+		between self.min_time_step and 1.0.
+
+		Arguments:
+		factor   Float > 0.
+		offset   Float between 0 and 1.
+
+		No return value
+		"""
+		if factor is None:
+			factor = 1.0
+		elif factor <= 0.0:
+			raise ValueError("Argument factor must be > 0.")
+		if offset is None:
+			offset = 0.0
+		elif offset < 0.0 or offset > 1.0:
+			raise ValueError("Argument offset must be between 0 and 1")
+		self.time_step = self.time_step * factor + offset
+		self.time_step = max(min(self.time_step, 1.0), self.min_time_step)
+		self.message(f"Adjusted time step = {self.time_step:.4g}\n")
+		return
+
+	def check_convergence(self):
+		"""Check if calculation has converged"""
+		if len(self.diffs) == 0:
+			return False  # not an exception
+		diff_val = self.get_diff_norm()
+		self.message(f"Accuracy reached so far: {diff_val:.2g} meV.\n")
+		return diff_val < self.target_accuracy
+
+	def check_history(self):
+		"""Analyze which potentials in previous iterations lie closest (experimental)"""
+		if self.n_it < 1:
+			return
+		# Shortcuts
+		n_chaos_check = self.check_chaos_steps
+		n_orbit_check = self.check_orbit_steps
+		# Obtain distances between current and previous potentials
+		history = self.get_distances()[:-1]
+		# Find closest previous iteration
+		iter_min = np.argmin(history)
+		iter_ago = len(history) - iter_min
+		# Rank iteration history by distance
+		sorted_iter_ago = len(history) - np.argsort(history)
+		if 'verbose' in sys.argv:
+			self.message(f"Full history of distances d(V_current - V_i): {history}\n")
+			self.message(f"Minimum at {iter_min} ({iter_ago} iteration steps ago)\n")
+			self.message(f"Iterations ago, sorted by distance: {sorted_iter_ago}\n")
+		# Detect chaotic behaviour; this is the case if the first n values are
+		# all (strictly) larger than n (n = n_chaos_check)
+		if len(history) >= n_chaos_check and min(sorted_iter_ago[:n_chaos_check]) > n_chaos_check:
+			self.message(f"Chaos detected: {sorted_iter_ago[:n_chaos_check]}\n")
+			if self.dynamic_time_step:
+				self.adjust_time_step(factor = 0.6)
+			return
+		# Detect periodic orbits of period > 1 by calculating GCD of last
+		# n_orbit_check values
+		if len(history) >= n_orbit_check:
+			orbit_period = np.gcd.reduce(sorted_iter_ago[:n_orbit_check])
+			# Check if GCD > 2 and if values are not "too" large
+			if orbit_period > 1 and min(sorted_iter_ago[:n_orbit_check]) <= n_orbit_check:
+				self.message(f"Periodic orbit detected, period {orbit_period}: {sorted_iter_ago[:n_orbit_check]}\n")
+				if self.dynamic_time_step:
+					self.adjust_time_step(factor = 1.0 / orbit_period)
+				return
+
+	def estimate_convergence(self, set_status = True):
+		if self.n_it == 1:
+			sys.stderr.write("Warning (SelfConSolver.estimate_convergence): No convergence after single iteration. Not enough data to estimate necessary number of iterations.\n")
+			status = 3
+		else:
+			diff_val = np.amax(np.abs(self.diffs), axis = 1)
+			diff_factors = diff_val[1:] / diff_val[:-1]
+			max_factor = np.amax(diff_factors[-min(self.n_it, 5):])
+			if max_factor < 0.95:
+				diff = diff_val[-1]
+				est_iterations = self.n_it + int(np.ceil(np.log(self.target_accuracy / diff) / np.mean(np.log(diff_factors))))
+				sys.stderr.write(f"Warning (SelfConSolver.estimate_convergence): Convergence is probable after approximately {est_iterations} iterations.\n")
+				status = 2
+			else:
+				sys.stderr.write("Warning (SelfConSolver.estimate_convergence): Convergence is unlikely even after many iterations.\n")
+				status = 3
+		if set_status:
+			self.status = status
+		return status
+
+	def next_iter(self):
+		"""Check status and initialize next iteration if needed.
+
+		Return:
+		cont   True or False. Whether loop needs to be continued.
+		"""
+		if not self.check_status():
+			raise SelfConError(f"Aborted (status {self.status}).\n")
+		converged = self.check_convergence()
+		self.check_history()
+		if converged:
+			self.convergence_count += 1
+			self.message(f"Consecutive convergences {self.convergence_count}/{self.cons_conv}.\n")
+		else:
+			self.convergence_count = 0
+		if self.n_it < self.min_iterations:
+			if converged:
+				self.message("Converged, but minimal number of iterations not yet reached.\n")
+			return True
+		elif self.n_it >= self.max_iterations:
+			self.message("Maximum number of iterations reached.\n")
+			return False  # not an exception
+		elif self.convergence_count >= self.cons_conv:
+			self.message(f"Converged after {self.n_it} iterations.\n")
+			return False  # not an exception
+		return True
+
+	def finalize(self):
+		"""Finalize by writing densities"""
+		if not self.check_status():
+			self.message(f"Aborted (status {self.status}).\n")
+		converged = self.check_convergence()
+		if self.n_it >= self.max_iterations:
+			self.estimate_convergence()
+		elif converged:
+			self.status = 0
+		if self.status <= self.acceptable_status:
+			self.update_density(finalization = True)
+			if len(self.vz) < len(self.pdensz):
+				self.update_potential()
+				self.apply_diff()
+
+		# clear eigenvalue data
+		if self.eidata and not self.keep_eidata:
+			del self.eidata
+			self.eidata = None
+
+	def run(self):
+		"""Run the iterative loop"""
+		if self.status >= 0:
+			sys.stderr.write("ERROR (SelfConSolver.run): Calculation has already been run.\n")
+			return
+		try:
+			while self.next_iter():
+				self.do_iteration()
+		except SelfConError as ex:
+			sys.stderr.write(f"ERROR (SelfConSolver.run): {ex}\n")
+			sys.stderr.write(f"ERROR (SelfConSolver.run): Calculation failed (SelfConError; status {self.status}).\n")
+			if self.debug:
+				raise
+		except KeyboardInterrupt:
+			sys.stderr.write("ERROR (SelfConSolver.run): Interrupt.\n")
+			if self.status < 1:
+				self.status = 1
+			if self.debug:
+				raise
+		except Exception as ex:
+			sys.stderr.write("ERROR (SelfConSolver.run): An exception occurred during self-consistent calculation.\n")
+			raise
+		self.finalize()
+
+	def get_potential(self):
+		"""Return potential"""
+		if self.status > self.acceptable_status:
+			sys.stderr.write("ERROR (SelfConSolver.get_potential): Calculation failed.\n")
+			return None
+		return None if len(self.vz) == 0 else self.vz[-1]
+
+	def get_densityz(self, qdens = False, electrons = False, holes = False, bg = False):
+		"""Return density
+
+		Arguments:
+		qdens          True or False. If True, return charge density instead of
+		               particle density.
+		electrons      True or False. Whether to include electrons.
+		holes          True or False. Whether to include holes.
+		background     True or False. Whether to return background density. This
+		               is the sum of bg and offset densities
+		"""
+		if self.status > self.acceptable_status:
+			sys.stderr.write("ERROR (SelfConSolver.get_densityz): Calculation failed.\n")
+			return None
+		pdensz = None if len(self.pdensz) == 0 else self.pdensz[-1]
+		factor = -1 if qdens else 1
+		if bg:
+			if electrons or holes:
+				ValueError("If bg is True, electrons, holes must be both False")
+			if self.pdensz_bg is None or self.pdensz_offset is None:
+				return None
+			else:
+				return factor * (self.pdensz_bg + self.pdensz_offset)
+		elif electrons and holes:
+			return None if pdensz is None else factor * pdensz
+		elif electrons:
+			return None if self.pdensz_e is None else factor * self.pdensz_e
+		elif holes:
+			return None if self.pdensz_h is None else factor * self.pdensz_h
+		else:
+			raise ValueError("At least one of electrons, holes, bg must be True")
+
+	def get_densityz_dict(self, qdens = False):
+		"""Wrapper for get_densityz() that returns a dict"""
+		result = {
+			'total': self.get_densityz(qdens=qdens, electrons=True, holes=True),
+			'e': self.get_densityz(qdens=qdens, electrons=True),
+			'h': self.get_densityz(qdens=qdens, holes=True),
+			'bg': self.get_densityz(qdens=qdens, bg=True),
+		}
+		return {k: v for k, v in result.items() if v is not None}
+
+class SelfConSolverFullDiag(SelfConSolver):
+	"""SelfConSolver subclass that implements the a	full diagonalization approach.
+	Unlike SelfConSolver, this class sums over all conduction band states as to
+	determine densityz at the CNP.
+	"""
+
+	def __init__(self, *args, **kwds):
+		super().__init__(*args, **kwds)
+
+		# Calculate offset density from all conduction subbands
+		# TODO: Implement for mixed kB-grid
+		kgrid = self.kbs.get_grid()
+		# Area density
+		n_bands_CB = 2 * self.params.nz
+		self.n_offset_CB = -n_bands_CB * kgrid.volume() / (4 * np.pi**2)
+		self.n_offset_CB_vol = self.n_offset_CB / (self.params.nz * self.params.zres)
+
+		if not any(v.zero() for v in kgrid):
+			sys.stderr.write("Warning (SelfConSolverFullDiag): Result is unreliable if the momentum grid does not contain k = 0.\n")
+
+		# Automatically decide how many eigenvalues to calculate and what
+		# target energy to use. We could also use an eigensolver that always
+		# finds the n largest eigenvalues instead.
+		# Do a full diagonalization with (almost) all eigenvalues
+		if 'verbose' in sys.argv:
+			print("Finding targetenergy and determining number of eigenvalues to calculate.")
+
+		modelopts_k0 = self.modelopts.copy()
+		modelopts_k0["neig"] = (self.params.norbitals - 4) * self.params.nz - 3
+		modelopts_k0["energy"] = 10000
+		modelopts_k0["solver"] = dsolv.solverconfig(self.num_cpus, modelopts_k0)
+
+		# Run in separate process to return GPU to initial condition
+		# to avoid issues with cupy solver.
+		with mp.Pool(1) as pool:
+			ddp_k0 = pool.apply(diag.hz_k0, (self.params,), modelopts_k0)
+			pool.close()
+			pool.join()
+
+		# Calculate all conduction band subbands and a few more bands
+		self.modelopts["neig"] = 2 * self.params.nz + 100
+		self.modelopts["energy"] = int(ddp_k0.eival.max())
+		self.modelopts["solver"] = dsolv.solverconfig(self.num_cpus, self.modelopts)
+
+		if 'verbose' in sys.argv:
+			print(f"Using a targetenergy of {self.modelopts['solver'].targetval} and {self.modelopts['neig']} eigenvalues for the selfcon iterations.")
+
+	def diagonalize(self):
+		if len(self.vz) < 1:
+			raise ValueError("SelfConSolver potential has not been initialized")
+
+		modelopts = self.modelopts.copy()
+		modelopts['pot'] = self.vz[-1]
+		if 'obs' in modelopts:
+			del modelopts['obs']
+		if 'dimful_obs' in modelopts:
+			del modelopts['dimful_obs']
+
+		modelopts['return_eivec'] = True
+		modelopts["params"] = self.params
+
+		# Diagonalization over grid
+		self.eidata = DiagData([DiagDataPoint(kb[0], paramval=kb[1], grid_index=i) for i, kb in enumerate(self.kbs)], grid=self.kbs.get_grid())
+		self.eidata.diagonalize(ModelMomentum2D(modelopts), modelopts["solver"])
+
+		# Set all band incides to be negative
+		# This simple bandalign algorithm works as we always know the
+		# largest modelopts["neig"] eigenvalues of the Hamiltonian.
+		for ddp in self.eidata:
+			ddp.sort_by_eival(inplace=True)
+			ddp.bindex = np.arange(-modelopts["neig"], 0)
+
+	def update_density(self, finalization = False):
+		"""Update density from diagonalization data"""
+		if self.eidata is None:
+			raise ValueError("No diagonalization data. Use diagonalize() before update_density().")
+		# In the following, the argument 'cardens' invokes a high-precision
+		# calculation of the chemical potential; it is "hidden" in the output 'ef'.
+		densitydata = integrated_dos(self.eidata, self.erange, self.params, broadening = self.broadening)
+		densitydata.strategy_no_e0 = 'ignore'
+
+		# Offset IDOS by a uniform contribution from all conduction subbands as we
+		# set CNP to be above all states.
+		densitydata = densitydata.offset(n_offset=self.n_offset_CB)
+
+		chem_pot = densitydata.energy_at_idos(self.cardens, save_as = 'ef')
+		self.special_energies = densitydata.special_energies
+
+		if 'verbose' in sys.argv:
+			print(f"DEBUG mu:{chem_pot=}")
+
+		if chem_pot is None:
+			self.status = 4
+			raise SelfConError("Unable to determine Fermi energy/chemical potential.\n")
+
+		# Calculate offset density
+		if self.n_offset is not None:
+			chem_pot_offset = densitydata.energy_at_idos(self.n_offset)
+			self.pdensz_offset = densityz(
+				self.eidata, chem_pot_offset, self.erange,
+				self.params.nz, dz = self.params.zres, norb=self.params.norbitals,
+				broadening=self.broadening, electrons=True, holes=True
+			)
+
+		# In older versions, the result of densityz() was explicitly multiplied
+		# by 1 / (2 pi)^2. This factor has been dropped here, as it is now
+		# applied internally in densityz_energy(), a function called by
+		# densityz().
+		pdensz = densityz(
+			self.eidata, chem_pot, self.erange, self.params.nz,
+			dz = self.params.zres, norb = self.params.norbitals,
+			broadening = self.broadening, electrons=True, holes=True
+		) - self.n_offset_CB_vol
+		self.pdensz.append(pdensz)
+
+		if finalization:
+			# split into electron/holes positive are holes
+			self.pdensz_e = np.maximum(pdensz, 0)
+			self.pdensz_h = np.minimum(pdensz, 0)
+
+		# Debug output
+		zval = self.params.zvalues_nm()
+		self.write_to_tempfile("scdens.csv", zval, -pdensz)  # qdensz = -pdensz
+		if 'verbose' in sys.argv:
+			print("Density: rho =", np.sum(pdensz) * self.params.zres)
+			print(pdensz[:8])
+			print("...")
+			print(pdensz[-8:])
+			print("Density antisymmetry:", np.amax(np.abs(pdensz - pdensz[::-1])))
+
+
+class SelfConSolverLL(SelfConSolver):
+	"""Container for self-consistent potential solver, with input, output, and methods
+
+	Attributes (input as __init__ arguments):
+	kbs                ZippedKB instance. The grid of k and B values.
+	params             PhysParams instance.
+	target_accuracy    Float. Target accuracy to be reached for a successful
+	                   calculation (status 0)
+	time_step          Float (> 0). Initial time step for abstract time. This is
+	                   equivalent to the weight of the current iteration result
+	                   entering into the new potential value. Values > 1 are
+	                   allowed, but typically not useful.
+	min_iterations     Integer. Minimum number of iterations that are calculated
+	                   in any case.
+	max_iterations     Integer. Maximum number of iterations before calculation
+	                   is aborted.
+	num_cpus           Integer. Number of workers to use during parallelized
+		               diagonalization processes.
+	erange             Array or tuple. The energy range.
+	cardens            Float or None. The desired carrier density, measured as
+	                   particle density (positive for electrons, negative for
+	                   holes).
+    outputid		   String or None. User-set suffix for (temporary) output
+                       files. If None, do not insert anything.
+	opts               A dict instance. General options.
+	modelopts          A dict instance. Model options.
+	bandalign_opts     A dict instance. Options for band alignment.
+
+	Attributes (from configuration values):
+	acceptable_status  Integer, 0-4. Maximum acceptable status level:
+	                   0: Successful
+	                   1: Calculation skipped or aborted
+	                   2: Did not converge, but convergence is likely after more
+	                   iterations
+	                   3: Did not converge, convergence can not be estimated
+	                   4: Failed critically
+	broadening         BroadeningFunction or MultiBroadening instance.
+	cons_conv          Integer > 0. Consecutive converged iterations needed
+	                   for result to be considered "truly" converged.
+	check_chaos_steps  Integer > 0. The number of iterations used for checking
+	                   chaotic behaviour.
+	check_orbit_steps  Integer > 0. The number of iterations used for checking
+	                   periodic orbits.
+	diff_norm_method   String. Choose which norm is used to get a measure for
+	                   how far away the last potential difference is from
+	                   convergence. Currently implemented are 'max' and 'rms'.
+	debug              True or False. Enable or disable debug mode. In debug
+	                   mode, write temporary files and re-raise SelfConError and
+	                   KeyboardInterrupt within the iteration loop. This is
+	                   useful to get traceback (for debugging). Other exceptions
+	                   are always re-raised.
+	dynamic_time_step  True or False. Whether the time_step for the self-
+	                   consistent calculation is adapted automatically (True)
+	                   between iterations or a fixed value (False; default).
+	min_time_step      Float between 0 and 1. The lower limit for time_step in
+	                   dynamic time step adjustment.
+	potential_to_zero  True or False. Whether to subtract the average value from
+	                   the potential at each iteration.
+	out                Stream. Where to write the status messages. By default
+	                   equal to sys.stderr.
+
+	Attributes (set at initialization and iteration):
+	n_it               Integer. Current iteration number (read-only).
+	convergence_count  Integer. Consecutive converged iterations so far
+	                   (read-only).
+	status             Integer. Current status; see attribute acceptable_status
+	                   for a list. This also can have the additional value -1,
+	                   meaning the calculation has not been completed.
+	times              Array of floats. Cumulated (abstract) time at every
+	                   iteration.
+	diffsolver         Callable. The differential equation solver.
+	epsilonz           1-dim array. Values of the dielectric constant as
+	                   function of z
+	bc                 A dict instance. The boundary conditions.
+	vz                 List of 1-dim arrays. Potentials after each iteration
+	                   step. The array axis represents the z dependence.
+	diffs              1-dim array. Potential difference as given by using
+	                   diagonalize(), update_density() and solve_potential().
+	                   Depending on the differential solver and time_step,
+	                   diffs[i] may differ from vz[i] - vz[i-1].
+	pdensz             List of 1-dim arrays. Particle densities after each
+	                   iteration step. The array axis represents the z
+	                   dependence. Electron-like states count as positive,
+	                   hole-like states as negative.
+	densz_bg           1-dim array or None. Background density as function of z.
+	                   This density is added to the density coming from the band
+	                   structure prior to solving the potential. In other words,
+	                   this is the density without any filled state.
+	eidata             DiagData instance. The most recent diagonalization
+	                   result.
+	tempfiles          List of strings. File names for temporary files that have
+	                   already been used.
+	min_time_step      Float. Defines the minimum time step reachable, depending
+	                   on cardens and valence band contribution at the electro-
+	                   chemical potential. Set after first iteration. Only
+	                   relevant if dynamic_time_step is set to True. (read only)
+
+	Attributes (set at convergence):
+	densz_e            1-dim array or None. Electron density as function of z.
+	                   Set only after convergence.
+	densz_h            1-dim array or None. Hole density as function of z. Set
+	                   only after convergence.
+	special_energies   A dict instance. For ef0, ef, etc.; see density.py.
+
+	Methods:
+	next_iter          Method to check abortion criteria and output status
+	                   information
+	check_acceptable_status  Check whether the current status is acceptable.
+	"""
+
+	def __init__(
+			self, kbs, params, target_accuracy=0.01, time_step=0.9,
+			max_iterations=10, min_iterations=0, num_cpus=1,
+			erange=None, cardens=None, weight=None, outputid=None,
+			opts=None, modelopts=None, bandalign_opts=None,
+			ll_mode=None, ll_max=None, h_sym=None):
+		"""Initialize self-consistent potential solver.
+
+		Arguments:
+		See Attribute information for class.
+		"""
+		super().__init__(
+			kbs, params, target_accuracy=target_accuracy,
+			time_step=time_step, max_iterations=max_iterations,
+			min_iterations=min_iterations, num_cpus=num_cpus, erange=erange,
+			cardens=cardens, weight=weight, outputid=outputid, opts=opts,
+			modelopts=modelopts, bandalign_opts=bandalign_opts
+		)
+		self.ll_mode = ll_mode
+		self.ll_max = ll_max
+		self.h_sym = h_sym
+		self.convergence_count = np.zeros(len(self.kbs), dtype=int)
+		self.diag_selector = [True] * len(self.kbs)  # for skipping converged calculation points
+		self.b0_idx = np.argmin(np.abs(self.kbs.b.values[0]))
+		self.time_step = np.array([self.time_step] * len(self.kbs))
+		self.times = [np.zeros(len(self.kbs), dtype=float)]
+		if get_config_bool('selfcon_erange_from_eivals'):
+			# Overwrite erange to use full energy range from
+			# eigenvalues of first diagonalization result
+			self.erange = None
+
+		if not get_config_bool('selfcon_ll_use_broadening'):
+			# Don't use broadening
+			self.broadening = None
+
+		if self.broadening is not None:
+			# Apply width dependence
+			bzval = kbs.b.get_values('bz')
+			self.broadening.apply_width_dependence(bzval, opts['broadening_dep'], in_place=True)
+
+	def init_potential(
+			self, potential = None, cardens = None, n_bg = None, n_offset = None,
+			n_surf = None, d_surf = 8.0, **kwds):
+		"""Initialize the potential.
+
+		Arguments:
+		potential   1-dim array or None. If an array, take the potential from
+		            this array, for example pre-loaded from a file. Otherwise,
+		            initialize potential using **kwds if the configuration value
+		            selfcon_use_init_density is True and set it to zero if it is
+		            False.
+		cardens     Float or None. The desired carrier density. If None, use the
+		            carrier density set by SelfConSolver.__init__().
+		n_bg        Float or None. The background density (uniformly distributed
+		            in z) that contributes to the total carrier density
+		            distribution used in solve_potential() but not to cardens.
+		n_offset    Float or None. Offset carrier density which contributes to
+		            cardens. The offset carrier distribution is calculated from
+		            the solved Hamiltonian in each iteration and is subtracted
+		            from the total carrier density distribution used in
+		            solve_potential().
+		n_surf      Number, 2-tuple, or None. If numeric, apply this surface
+		            density (in nm^-2) to both bottom and top surface in the
+		            well layer. If a 2-tuple, apply two different densities to
+		            bottom and top layer, respectively. If one of the two values
+		            is None, that respective surface is not considered, i.e.,
+		            the bulk extends completely to the interface of the well
+		            layer. The value (None, None) is not permitted.
+		d_surf      Number. Thickness of the surface layer(s) in nm.
+		**kwds      The keyword arguments n_depletion, l_depletion, v_inner,
+		            v_outer, efield. These are passed to the function
+		            init_potential (potential.py). Note that even if potential
+		            is an array, the keyword arguments are used to determine
+		            carrier density, boundary conditions, etc.
+		"""
+		verbose = 'verbose' in sys.argv
+		nz = self.params.nz
+		dz = self.params.zres
+		zval = self.params.zvalues_nm()
+		cardens = self.cardens if cardens is None else cardens
+		cardens, qdensz, qdensz_bg, bc = init_potential(self.params, cardens = cardens, verbose = verbose, **kwds)
+		self.pdensz_offset = np.zeros_like(qdensz)
+		if cardens is None:
+			# Even after init_potential() cardens is still None.
+			# Handle it like a carrier density of 0 e/nm^2
+			cardens = 0
+		if n_bg is not None:
+			# background density
+			zif1, zif2 = self.params.well_z(strict=True)  # should always work, since this is already checked in init_potential()
+			qdensz_bg = np.zeros(nz, dtype=float)
+			qdensz_bg[zif1: zif2 + 1] -= n_bg / dz / ((zif2 + 1) - zif1)
+		if n_offset is not None:
+			# density offset
+			# calculates pdensz_offset in each iteration in self.update_potential()
+			# which will be subtracted from pdensz for solve_potential()
+			cardens += n_offset
+			self.n_offset = n_offset
+		qdensz = np.zeros_like(qdensz)
+		if potential is None:
+			if get_config_bool('selfcon_use_init_density'):
+				vz = solve_potential(
+					zval, qdensz + qdensz_bg, self.epsilonz, dz = dz,
+					verbose = verbose, well_center = self.well_center, **bc
+				)
+				vz = np.repeat(np.zeros_like(vz)[np.newaxis,:], len(self.kbs.b), axis=0)
+			else:
+				vz = np.repeat(np.zeros_like(qdensz)[np.newaxis,:], len(self.kbs.b), axis=0)
+		elif isinstance(potential, np.ndarray):
+			if potential.ndim == 1:
+				vz = np.repeat(potential[np.newaxis,:], len(self.kbs.b), axis=0)
+			elif potential.ndim == 2:
+				vz = potential
+			else:
+				raise SelfConError("Input potential has wrong shape.\n")
+			# Consistency checks
+			if not np.all([bc.test_potential(zval, v, verbose = True) for v in vz]):
+				sys.stderr.write("Warning (SelfConSolver.init_potential): Input potential is incompatible with the boundary conditions.\n")
+			cardens_from_vz = np.array([cardens_from_potential(v, self.epsilonz, dz = dz) for v in vz])
+			if cardens is not None and np.any((cardens_from_vz - cardens) > 1e-6):
+				sys.stderr.write(f"Warning (SelfConSolver.init_potential): Carrier density from input potential ({cardens_from_vz}) differs from carrier density from direct input or from boundary conditions ({cardens:4g}).\n")
+		else:
+			raise NotImplementedError
+		self.cardens = cardens
+		self.pdensz.append(-np.repeat(qdensz[:,np.newaxis], len(self.kbs.b), axis=1))
+		if n_surf is not None:  # Experimental: surface state background density
+			self.pdensz_bg = densityz_surface_states(self.params, n_surf, d_surf)
+		else:
+			self.pdensz_bg = -qdensz_bg
+		self.bc = bc
+		self.vz.append(vz)
+		self.write_to_tempfile('scpot.csv', zval, vz)
+		self.write_to_tempfile('scdens.csv', zval, qdensz)
+		if verbose:
+			print("Pot:")
+			print(vz)
+			print("Potential antisymmetric part:")
+			print(vz - vz[::-1])
+			print("Potential antisymmetry")
+			print(np.amax(np.abs(vz - vz[::-1])))
+
+		if self.ll_mode in ['sym', 'full']:
+			modelopts_hsym = self.modelopts.copy()
+			for k in ['obs', 'dimful_obs', 'energy', 'neig', 'cpu', 'pot', 'obs_prop', 'return_eivec', 'custom_interface_length']:
+				if k in modelopts_hsym:
+					del modelopts_hsym[k]
+			self.h_sym = SymbolicHamiltonian(
+				hz_sparse_split, (self.params,), modelopts_hsym, hmagn=True
+			)
+
+	def diagonalize(self):
+		if len(self.vz) < 1:
+			raise ValueError("SelfConSolver potential has not been initialized")
+
+		modelopts = self.modelopts.copy()
+		if 'obs' in modelopts:
+			del modelopts['obs']
+		if 'dimful_obs' in modelopts:
+			del modelopts['dimful_obs']
+
+		# temporarily store previous result
+		eidata_old = self.eidata
+
+		# Determine band indices by slowly increasing the potential. The
+		# following function applies band alignment to the Hamiltonian
+		# H = H0 + alpha V, where H0 is the Hamiltonian without potential, V is
+		# the potential, and alpha is increased in small steps from 0 to 1. The
+		# result is the DiagDataPoint for alpha = 1, which has its band indices
+		# set.
+		eidata_k0 = bandindices_adiabatic_ll(
+			self.ll_mode, self.ll_max, self.h_sym, self.params,
+			pot=self.vz[-1][self.b0_idx], num_cpus=self.num_cpus,
+			modelopts=modelopts, bandalign_opts=self.bandalign_opts
+		)
+
+		# Diagonalization over grid
+		modelopts['return_eivec'] = True
+
+		# Only take b-values which aren't converged yet; not sure if this always works...
+		bs = VectorGrid(
+			self.kbs.b.var[0],
+			self.kbs.b.get_values('bz')[self.diag_selector],
+			astype=self.kbs.b.vtype,
+			prefix=self.kbs.b.prefix
+		)
+
+		eidata_new = lldiag.hll(
+			self.ll_mode, bs, self.ll_max, self.h_sym, self.params,
+			modelopts=modelopts, list_kwds={'pot': self.vz[-1][self.diag_selector]},
+			description=f"SC Hartree iteration {self.n_it}",
+			num_processes=self.num_cpus)
+		eidata_new.set_char(eidata_k0)
+		bandindices(eidata_new, input_data=eidata_k0, params=self.params, e0 = eidata_k0.get_eival0())
+
+		# Get full erange for DOS calculations once
+		if self.erange is None:
+			if self.min_eres < 5000 and self.modelopts['neig'] > 200:
+				sys.stderr.write(
+					"Warning (SelfConSolverLL.diagonalize): For selfcon_erange_from_eivals=true and neig > 200, the setting selfcon_energy_points < 5000 may lead to incorrect results. Consider increasing amount of energy points by setting the configuration value selfcon_energy_points. Note that a too high value can drastically increase calculation time).\n"
+				)
+			all_eivals = np.array(list(eidata_new.get_eival_by_bindex().values()))
+			erange = [np.nanmin(all_eivals), np.nanmax(all_eivals)]
+			self.erange = erange_from_target_eres(erange, self.min_eres)
+			sys.stderr.write("Energy range from all eigenvalues: ({:.2f}, {:.2f}, {})\n".format(*self.erange))
+
+		# Put in previous results for skipped calculations
+		if eidata_old is not None and len(eidata_new) < len(eidata_old):
+			bval_new = eidata_new.get_paramval()
+			ddps = []
+			for ddp_old in eidata_old:
+				bval_old = Vector(ddp_old.paramval, astype='z') if isinstance(ddp_old.paramval, (float, np.floating, int, np.integer)) else ddp_old.paramval
+				new_idx = bval_new.index(bval_old, acc=1e-10)
+				ddps.append(ddp_old if new_idx is None else eidata_new[new_idx])
+			self.eidata = DiagData(ddps, grid=self.kbs.b)
+		else:
+			self.eidata = eidata_new
+
+		self.write_bandindex_tempfile()
+
+	def update_density(self, finalization=False):
+		"""Update density from diagonalization data"""
+		if self.eidata is None:
+			raise ValueError(
+				"No diagonalization data. Use diagonalize() before update_density().")
+		# In the following, the argument 'cardens' invokes a high-precision
+		# calculation of the chemical potential; it is "hidden" in the output 'ef'.
+		densitydata = integrated_dos_ll(
+			self.eidata, self.erange, self.params, broadening=self.broadening
+		)
+
+		chem_pot = densitydata.energy_at_dos_ll(self.cardens, subdiv=1)[1].flatten()
+		ef0 = densitydata.energy_at_dos_ll(0., subdiv=1)[1].flatten()
+		self.special_energies = {'ef': chem_pot, 'ef0': ef0}
+
+		if chem_pot is None:
+			self.status = 4
+			raise SelfConError("Unable to determine Fermi energy/chemical potential.\n")
+
+		# For k-dependence the factors 1 / (2 pi)^2 ensure that one occupied state
+		# "corresponds" to a charge density of e / A_BZ.
+		# For B-dependence this is ensured by multiplication with degeneracy/area
+		# (at the moment in int_dos_by_band())
+
+		# For finalization, calculate electron and hole density, then exit
+		if finalization:
+			# Set missing data to zero for now
+			self.pdensz_e = densityz_ll(
+				self.eidata, chem_pot, self.erange, self.params.nz,
+				dz=self.params.zres, norb=self.params.norbitals,
+				broadening=self.broadening, electrons=True, holes=False
+			)
+			self.pdensz_h = densityz_ll(
+				self.eidata, chem_pot, self.erange, self.params.nz,
+				dz=self.params.zres, norb=self.params.norbitals,
+				broadening=self.broadening, electrons=False, holes=True
+			)
+			return
+
+		pdensz = densityz_ll(
+			self.eidata, chem_pot, self.erange, self.params.nz,
+			dz=self.params.zres, norb=self.params.norbitals,
+			broadening=self.broadening, electrons=True, holes=True
+		)
+		self.pdensz.append(pdensz)
+
+		# Debug output
+		zval = self.params.zvalues_nm()
+		self.write_to_tempfile("scdens.csv", zval, -pdensz[-1])  # qdensz = -pdensz
+		if 'verbose' in sys.argv:
+			print("Density: rho =", np.sum(pdensz) * self.params.zres)
+			print(pdensz[:8])
+			print("...")
+			print(pdensz[-8:])
+			print("Density antisymmetry:", np.amax(np.abs(pdensz - pdensz[::-1])))
+
+	def update_potential(self):
+		"""Update potential (difference) from density"""
+		if len(self.diffs) + 1 >= len(self.pdensz):  # array diffs is always one shorter
+			raise ValueError("Diff has already been updated. First use update_density() (again).")
+
+		qdensz = -(self.pdensz[-1] + self.pdensz_bg[np.newaxis, :])
+		zval = self.params.zvalues_nm()
+
+		verbose = 'verbose' in sys.argv
+		# Handle each B point individually
+		vz = np.array([
+			solve_potential(
+				zval, densz_i, self.epsilonz, dz=self.params.zres,
+				verbose=verbose, **self.bc
+			) for densz_i in qdensz
+		])
+
+		if verbose:
+			print("Potential antisymmetry:", np.amax(np.abs(vz[0] - vz[0][::-1])))
+		if self.potential_to_zero:  # put potential average at zero
+			vz = vz - (np.sum(vz, axis=1) / vz.shape[1])[:, np.newaxis]
+
+		# Calculate potential step and apply diffsolver to get the new potential
+		self.diffs.append(vz - self.vz[-1])
+
+		# Debug output
+		for i, vz_B in enumerate(vz):
+			self.write_to_tempfile(f"scpot_B_{self.kbs.b.get_values('bz')[i]}.csv", zval, vz_B)
+
+	def get_diff_norm(self, arr=None):
+		"""Calculate a measure for convergence from last diff, depending on diff_norm_method.
+
+		Arguments
+		arr   Numpy array or None. If None, use the last entry in self.diffs. If
+		      arr is set, use that array instead.
+		"""
+		# last axis is z-dimension
+		if arr is None:
+			arr = self.diffs[-1]
+		if self.diff_norm_method == 'max':  # maximum (a.k.a. sup or L-infinity norm)
+			return np.amax(np.abs(arr), axis=-1)
+		elif self.diff_norm_method == 'rms':  # root-mean-square (L2 norm)
+			return np.sqrt(np.mean(arr ** 2, axis=-1))
+		else:  # not implemented
+			raise NotImplementedError(
+				f"Diff norm {self.diff_norm_method} not implemented")
+
+	def get_distances(self, arr=None):
+		"""Get distances of the last value of the array (vz) to the previous ones
+
+		Argument:
+		arr   Numpy array or None. If None, use self.vz. If arr is set, use that
+		      array instead.
+		"""
+		arr = np.asarray(self.vz) if arr is None else np.asarray(arr)
+		if arr.ndim != 3:
+			raise ValueError("Array must be 3 dimensional")
+		return self.get_diff_norm(arr - arr[-1])  # this is now simplified due to sum over specific axis in get_diff_norm()
+
+	def adjust_time_step(self, factor=None, index=None, offset=None):
+		"""Adjust single time step value to absolute or relative value.
+		Adapted from SelfConSolver-Version to be compatible with individual time
+		steps for each B-point.
+
+		The result is time_step_old * factor + offset. This value is clipped
+		between self.min_time_step and 1.0.
+
+		Arguments:
+		factor   Float > 0.
+		index    Integer. Index for value in time step array (same as B-point
+		         index).
+		offset   Float between 0 and 1.
+
+		No return value
+		"""
+		if index is None:
+			self.message("Could not adjust time step. No index given.")
+			return
+		if factor is None:
+			factor = 1.0
+		elif factor <= 0.0:
+			raise ValueError("Argument factor must be > 0.")
+		if offset is None:
+			offset = 0.0
+		elif offset < 0.0 or offset > 1.0:
+			raise ValueError("Argument offset must be between 0 and 1")
+		self.time_step[index] = self.time_step[index] * factor + offset
+		self.time_step[index] = max(min(self.time_step[index], 1.0), self.min_time_step)
+		self.message(f"Adjusted time step = {self.time_step[index]:.4g}\n")
+		return
+
+	def check_convergence(self):
+		"""Check if calculation has converged"""
+		if len(self.diffs) == 0:
+			return [False] * len(self.kbs)  # not an exception
+		diff_val = self.get_diff_norm()
+		self.message(f"Accuracy reached so far: {np.array2string(diff_val, formatter={'float_kind': lambda x: f'{x:.2g}'})} meV.\n")
+		return diff_val < self.target_accuracy
+
+	def check_history(self):
+		"""Analyze which potentials in previous iterations lie closest (experimental)"""
+		if self.n_it < 1:
+			return
+		# Shortcuts
+		n_chaos_check = self.check_chaos_steps
+		n_orbit_check = self.check_orbit_steps
+		# Obtain distances between current and previous potentials
+		history_arr = self.get_distances()[:-1]
+		# ToDo: Each B field could use its own time_step.
+		# loop over B-values, thus swapaxes from (n_it, nB) -> (nB, n_it)
+		for idx, history in enumerate(history_arr.swapaxes(0, 1)):
+			if all(np.abs(history) < 1e-12) or self.convergence_count[idx] >= self.cons_conv:
+				# skip B-value if all history is zero or already converged
+				continue
+			# Find closest previous iteration
+			iter_min = np.argmin(history)
+			iter_ago = len(history) - iter_min
+			self.message(f"----- History check for B={self.kbs.b[idx]}T\n")
+			self.message(f"Full history of distances d(V_current - V_i): {history}\n")
+			self.message(f"Minimum at {iter_min} ({iter_ago} iteration steps ago)\n")
+			# Rank iteration history by distance
+			sorted_iter_ago = len(history) - np.argsort(history)
+			self.message(f"Iterations ago, sorted by distance: {sorted_iter_ago}\n")
+			# Detect chaotic behaviour; this is the case if the first n values are
+			# all (strictly) larger than n (n = n_chaos_check)
+			if len(history) >= n_chaos_check and min(sorted_iter_ago[:n_chaos_check]) > n_chaos_check:
+				self.message(f"Chaos detected: {sorted_iter_ago[:n_chaos_check]}\n")
+				if self.dynamic_time_step:
+					self.adjust_time_step(factor = 0.6, index=idx)
+				continue
+			# Detect periodic orbits of period > 1 by calculating GCD of last
+			# n_orbit_check values
+			if len(history) >= n_orbit_check:
+				orbit_period = np.gcd.reduce(sorted_iter_ago[:n_orbit_check])
+				# Check if GCD > 2 and if values are not "too" large
+				if orbit_period > 1 and min(sorted_iter_ago[:n_orbit_check]) <= n_orbit_check:
+					self.message(f"Periodic orbit detected, period {orbit_period}: {sorted_iter_ago[:n_orbit_check]}\n")
+					if self.dynamic_time_step:
+						self.adjust_time_step(factor = 1.0 / orbit_period, index=idx)
+
+	def estimate_convergence(self, set_status=True):
+		if self.n_it == 1:
+			sys.stderr.write(
+				"Warning (SelfConSolver.estimate_convergence): No convergence after single iteration. Not enough data to estimate necessary number of iterations.\n")
+			status = 3
+		else:
+			# ToDo: This is not properly implemented yet and won't give a correct estimate
+			diff_val = np.amax(np.abs(self.diffs)[:, 0, :], axis=1)
+			diff_factors = diff_val[1:] / diff_val[:-1]
+			max_factor = np.amax(diff_factors[-min(self.n_it, 5):])
+			if max_factor < 0.95:
+				diff = diff_val[-1]
+				est_iterations = self.n_it + int(np.ceil(
+					np.log(self.target_accuracy / diff) / np.mean(
+						np.log(diff_factors))))
+				sys.stderr.write(
+					f"Warning (SelfConSolver.estimate_convergence): Convergence is probable after approximately {est_iterations} iterations.\n")
+				status = 2
+			else:
+				sys.stderr.write(
+					"Warning (SelfConSolver.estimate_convergence): Convergence is unlikely even after many iterations.\n")
+				status = 3
+		if set_status:
+			self.status = status
+		return status
+
+	def next_iter(self):
+		"""Check status and initialize next iteration if needed.
+
+		Return:
+		cont   True or False. Whether loop needs to be continued.
+		"""
+		if not self.check_status():
+			raise SelfConError(f"Aborted (status {self.status}).\n")
+		converged_arr = self.check_convergence()
+		self.check_history()
+		for idx, c in enumerate(converged_arr):  # check convergence for each B individually
+			if c:
+				self.convergence_count[idx] += 1
+				self.message(f"Consecutive convergences for B={self.kbs.b[idx]}T: {self.convergence_count[idx]}/{self.cons_conv}.\n")
+			else:
+				self.convergence_count[idx] = 0
+		if self.n_it < self.min_iterations:
+			if any(converged_arr):
+				self.message(f"B={self.kbs.b[converged_arr]} converged, but minimal number of iterations not yet reached.\n")
+			return True
+		elif self.n_it >= self.max_iterations:
+			self.message("Maximum number of iterations reached.\n")
+			return False  # not an exception
+		elif all(self.convergence_count >= self.cons_conv):
+			self.message(f"Converged after {self.n_it} iterations.\n")
+			return False  # not an exception
+		# Update selector for B-points to skip, because of convergence.
+		# (saving calculation time)
+		fully_converged = self.convergence_count >= self.cons_conv
+		if self.debug:
+			self.message(
+				f"{np.count_nonzero(fully_converged)}"
+				f"/{len(self.kbs)} B-points converged.\n"
+			)
+		self.diag_selector = np.invert(fully_converged)
+		self.diag_selector[self.b0_idx] = True  # never skip calculation for B=0
+		return True
+
+	def finalize(self):
+		"""Finalize by writing densities"""
+		if not self.check_status():
+			self.message(f"Aborted (status {self.status}).\n")
+		converged_arr = self.check_convergence()
+		if self.n_it >= self.max_iterations:
+			self.estimate_convergence()
+		elif all(converged_arr):
+			self.status = 0
+		if self.status <= self.acceptable_status:
+			self.update_density(finalization = True)
+			if len(self.vz) < len(self.pdensz):
+				self.update_potential()
+				self.apply_diff()
+
+		# clear eigenvalue data (TODO: Make optional)
+		if self.eidata:
+			del self.eidata
+			self.eidata = None
+
+
+class SelfConSolverLLFullDiag(SelfConSolverLL):
+	def __init__(self, *args, **kwds):
+		super().__init__(*args, **kwds)
+
+		# TODO: The present implementation fails on the LL mode 'sym', because
+		# the diagsolver for each LL does not get the correct number of
+		# eigenvalues (neig). In the future, this issue shall be solved by
+		# changing the diagsolver so that it considers the correct neig value
+		# also for the lowest LLs (llindex -2, -1, 0).
+		if self.ll_mode != 'full':
+			raise NotImplementedError("The present implementation of SelfConSolverLLFullDiag supports LL mode 'full' only")
+
+		# Automatically decide how many eigenvalues to calculate and what
+		# target energy to use. We could also use an eigensolver that always
+		# finds the n largest eigenvalues instead.
+		# Do a full diagonalization with (almost) all eigenvalues
+		if 'verbose' in sys.argv:
+			print("Finding targetenergy and determining number of eigenvalues to calculate.")
+
+		modelopts_k0 = self.modelopts.copy()
+		# Note that only one of the CB subbands has a -1 LL
+		modelopts_k0["neig"] = (self.params.norbitals - 4) * (self.ll_max + 1 + self.ll_max + 2)
+		modelopts_k0["energy"] = 10000
+		modelopts_k0["solver"] = dsolv.solverconfig(self.num_cpus, modelopts_k0)
+
+		ddp_k0 = lldiag.hll_k0(self.ll_mode, self.ll_max, self.h_sym, self.params, modelopts_k0, description = "Calculating bands (k=0)...\n", return_eivec = True)
+
+		# Calculate all conduction band LLs and a few more (at least 10 full subbands with LLs)
+		self.modelopts["neig"] = (2 * self.ll_max + 3) * self.params.nz + 2 * (self.ll_max + 3) * 10
+		self.modelopts["energy"] = int(ddp_k0.eival.max())
+		self.modelopts["solver"] = dsolv.solverconfig(self.num_cpus, self.modelopts)
+
+		if 'verbose' in sys.argv:
+			print(f"Using a targetenergy of {self.modelopts['solver'].targetval} and {self.modelopts['neig']} eigenvalues for the selfcon iterations.")
+
+
+	def diagonalize(self):
+		if len(self.vz) < 1:
+			raise ValueError("SelfConSolver potential has not been initialized")
+
+		modelopts = self.modelopts.copy()
+		if 'obs' in modelopts:
+			del modelopts['obs']
+		if 'dimful_obs' in modelopts:
+			del modelopts['dimful_obs']
+
+		# Update modelopts so we can use the Model framwork
+		modelopts['return_eivec'] = True
+		modelopts['h_sym'] = self.h_sym
+		modelopts['orbital_magn'] = False
+		modelopts["ll_mode"] = self.ll_mode
+		modelopts["ll_max"] = self.ll_max
+		modelopts["params"] = self.params
+
+		self.eidata = DiagData([DiagDataPoint(0, paramval=b, grid_index=i) for i, b in enumerate(self.kbs.get_grid())], grid=self.kbs.get_grid())
+		self.eidata.diagonalize(ModelLL(modelopts), modelopts["solver"], {'pot': self.vz[-1]})
+
+		# Set all band incides to be negative
+		# This simple bandalign algorithm works as we always know the
+		# largest modelopts["neig"] eigenvalues of the Hamiltonian.
+		for ddp in self.eidata:
+			ddp.sort_by_eival(inplace=True)
+			ddp.bindex = np.arange(-modelopts["neig"], 0)
+
+	def update_density(self, finalization=False):
+		"""Update density from diagonalization data"""
+		if self.eidata is None:
+			raise ValueError(
+				"No diagonalization data. Use diagonalize() before update_density().")
+		# In the following, the argument 'cardens' invokes a high-precision
+		# calculation of the chemical potential; it is "hidden" in the output 'ef'.
+		densitydata = integrated_dos_ll(self.eidata, self.erange, self.params, broadening=self.broadening)
+		densitydata.strategy_no_e0 = 'ignore'
+
+		# Calculate offset density
+		# Note that only every other Gamma 6 subband has -1 level -> (ll_max + 1) + (ll_max + 2)
+		offset = self.params.nz * (self.ll_max + 1 + self.ll_max + 2) * eoverhbar/(2*np.pi) * self.kbs.b.get_values("bz")
+		offset_vol = offset / (self.params.nz * self.params.zres)
+
+		# TODO: The following functionality should be implemented in DensityData.offset()
+		densitydata.densdata = (densitydata.densdata + offset[:,np.newaxis])
+
+		chem_pot = densitydata.energy_at_dos_ll(self.cardens, subdiv=1)[1].flatten()
+		ef0 = densitydata.energy_at_dos_ll(0., subdiv=1)[1].flatten()
+		self.special_energies = {'ef': chem_pot, 'ef0': ef0}
+
+		if 'verbose' in sys.argv:
+			print(f"DEBUG mu:{chem_pot=}")
+
+		if chem_pot is None:
+			self.status = 4
+			raise SelfConError("Unable to determine Fermi energy/chemical potential.\n")
+
+		# For k-dependence the factors 1 / (2 pi)^2 ensure that one occupied state
+		# "corresponds" to a charge density of e / A_BZ.
+		# For B-dependence this is ensured by multiplication with degeneracy/area
+		# (at the moment in int_dos_by_band())
+
+		pdensz = densityz_ll(
+			self.eidata, chem_pot, self.erange, self.params.nz,
+			dz=self.params.zres, norb=self.params.norbitals,
+			broadening=self.broadening, electrons=True, holes=True, offset_vol=offset_vol,
+			assume_sorted_aligned = True
+		)
+		self.pdensz.append(pdensz)
+
+		if finalization:
+			# split into electron/holes positive are holes
+			self.pdensz_e = np.maximum(pdensz, 0)
+			self.pdensz_h = np.minimum(pdensz, 0)
+
+		# Debug output
+		zval = self.params.zvalues_nm()
+		self.write_to_tempfile("scdens.csv", zval, -pdensz[-1])  # qdensz = -pdensz
+		if 'verbose' in sys.argv:
+			print("Density: rho =", np.sum(pdensz) * self.params.zres)
+			print(pdensz[:8])
+			print("...")
+			print(pdensz[-8:])
+			print("Density antisymmetry:", np.amax(np.abs(pdensz - pdensz[::-1])))
diff --git a/kdotpy-v1.0.0/src/kdotpy/spinmat.py b/kdotpy-v1.0.0/src/kdotpy/spinmat.py
new file mode 100644
index 0000000000000000000000000000000000000000..4312b2b0e491d50420583b682643c68f2721a81d
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/spinmat.py
@@ -0,0 +1,167 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+
+### ANGULAR MOMENTUM MATRICES ###
+
+_s2 = np.sqrt(2)
+_s3 = np.sqrt(3)
+_s6 = np.sqrt(6)
+_s8 = 2 * np.sqrt(2)
+
+## Definition of total angular momentum matrices (note multiplicative factors)
+jxmat = np.array([
+	[   0,   1,   0,   0,   0,   0,   0,   0],
+	[   1,   0,   0,   0,   0,   0,   0,   0],
+	[   0,   0,   0, _s3,   0,   0,   0,   0],
+	[   0,   0, _s3,   0,   2,   0,   0,   0],
+	[   0,   0,   0,   2,   0, _s3,   0,   0],
+	[   0,   0,   0,   0, _s3,   0,   0,   0],
+	[   0,   0,   0,   0,   0,   0,   0,   1],
+	[   0,   0,   0,   0,   0,   0,   1,   0]], dtype = complex) * 0.5
+jymat = np.array([
+	[   0,  -1,   0,   0,   0,   0,   0,   0],
+	[   1,   0,   0,   0,   0,   0,   0,   0],
+	[   0,   0,   0,-_s3,   0,   0,   0,   0],
+	[   0,   0, _s3,   0,  -2,   0,   0,   0],
+	[   0,   0,   0,   2,   0,-_s3,   0,   0],
+	[   0,   0,   0,   0, _s3,   0,   0,   0],
+	[   0,   0,   0,   0,   0,   0,   0,  -1],
+	[   0,   0,   0,   0,   0,   0,   1,   0]], dtype = complex) * 0.5j
+jzmat = np.array([
+	[ 0.5,   0,   0,   0,   0,   0,   0,   0],
+	[   0,-0.5,   0,   0,   0,   0,   0,   0],
+	[   0,   0, 1.5,   0,   0,   0,   0,   0],
+	[   0,   0,   0, 0.5,   0,   0,   0,   0],
+	[   0,   0,   0,   0,-0.5,   0,   0,   0],
+	[   0,   0,   0,   0,   0,-1.5,   0,   0],
+	[   0,   0,   0,   0,   0,   0, 0.5,   0],
+	[   0,   0,   0,   0,   0,   0,   0,-0.5]], dtype = complex)
+
+## Definition of (proper) spin matrices (note multiplicative factors)
+sxmat = np.array([
+	[   0,   3,   0,   0,   0,   0,   0,   0],
+	[   3,   0,   0,   0,   0,   0,   0,   0],
+	[   0,   0,   0, _s3,   0,   0, _s6,   0],
+	[   0,   0, _s3,   0,   2,   0,   0, _s2],
+	[   0,   0,   0,   2,   0, _s3,-_s2,   0],
+	[   0,   0,   0,   0, _s3,   0,   0,-_s6],
+	[   0,   0, _s6,   0,-_s2,   0,   0,  -1],
+	[   0,   0,   0, _s2,   0,-_s6,  -1,   0]], dtype = complex) / 6
+symat = np.array([
+	[   0,  -3,   0,   0,   0,   0,   0,   0],
+	[   3,   0,   0,   0,   0,   0,   0,   0],
+	[   0,   0,   0,-_s3,   0,   0,-_s6,   0],
+	[   0,   0, _s3,   0,  -2,   0,   0,-_s2],
+	[   0,   0,   0,   2,   0,-_s3,-_s2,   0],
+	[   0,   0,   0,   0, _s3,   0,   0,-_s6],
+	[   0,   0, _s6,   0, _s2,   0,   0,   1],
+	[   0,   0,   0, _s2,   0, _s6,  -1,   0]], dtype = complex) * 1.j / 6
+szmat = np.array([
+	[   3,   0,   0,   0,   0,   0,   0,   0],
+	[   0,  -3,   0,   0,   0,   0,   0,   0],
+	[   0,   0,   3,   0,   0,   0,   0,   0],
+	[   0,   0,   0,   1,   0,   0,-_s8,   0],
+	[   0,   0,   0,   0,  -1,   0,   0,-_s8],
+	[   0,   0,   0,   0,   0,  -3,   0,   0],
+	[   0,   0,   0,-_s8,   0,   0,  -1,   0],
+	[   0,   0,   0,   0,-_s8,   0,   0,   1]], dtype = complex) / 6
+
+sigmax = np.array([[0, 1], [1, 0]], dtype = complex)
+sigmay = np.array([[0, -1], [1, 0]], dtype = complex) * 1.j
+sigmaz = np.array([[1, 0], [0, -1]], dtype = complex)
+
+tx = np.array([
+	[-_s3,   0,   1,   0],
+	[   0,  -1,   0, _s3]], dtype = complex) / (3 * _s2)
+ty = np.array([
+	[ _s3,   0,   1,   0],
+	[   0,   1,   0, _s3]], dtype = complex) * -1.j / (3 * _s2)
+tz = np.array([
+	[   0,   1,   0,   0],
+	[   0,   0,   1,   0]], dtype = complex) * _s2 / 3
+txx = np.array([
+	[   0,  -1,   0, _s3],
+	[-_s3,   0,   1,   0]], dtype = complex) / (3 * _s2)
+tyy = np.array([
+	[   0,  -1,   0,-_s3],
+	[ _s3,   0,   1,   0]], dtype = complex) / (3 * _s2)
+tzz = np.array([
+	[   0,   1,   0,   0],
+	[   0,   0,  -1,   0]], dtype = complex) * _s2 / 3
+tyz = np.array([
+	[  -1,   0,-_s3,   0],
+	[   0, _s3,   0,   1]], dtype = complex) * 1.j / (2 * _s6)
+tzx = np.array([
+	[  -1,   0, _s3,   0],
+	[   0, _s3,   0,  -1]], dtype = complex) * 1 / (2 * _s6)
+txy = np.array([
+	[   0,   0,   0,  -1],
+	[  -1,   0,   0,   0]], dtype = complex) * 1.j / _s6
+
+
+## Basis for sigma_i, J_i, T_i, and U_i = T_i^dagger in 3-dimensional representation.
+sigma3basis = [sigmax, sigmay, sigmaz]
+j3basis = [jxmat[2:6, 2:6], jymat[2:6, 2:6], jzmat[2:6, 2:6]]
+t3basis = [tx, ty, tz]
+u3basis = [m.conjugate().transpose() for m in t3basis]
+
+## Basis for Tij and Uij = Tij^dagger in 5-dimensional representation.
+## Note: (2 Tzz - Txx - Tyy) / sqrt(3) = sqrt(3) Tzz
+t5basis = [2 * tyz, 2 * tzx, 2 * txy, txx - tyy, np.sqrt(3.) * tzz]
+u5basis = [m.conjugate().transpose() for m in t5basis]
+
+def restrictmat(mat, indices):
+	"""Restrict a matrix by putting all entries with indices outside the given set to zero.
+
+	Arguments:
+	mat      A matrix (2-dim numpy array)
+	indices  List or array of indices which should not be set to zero.
+
+	Returns:
+	A matrix (2-dim numpy array) of the same size as the input matrix
+	"""
+	iden = np.zeros(mat.shape, dtype = int)
+	indices = np.asarray(indices)
+	iden[indices, indices] = 1
+	return iden @ (mat @ iden)
+
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/strain.py b/kdotpy-v1.0.0/src/kdotpy/strain.py
new file mode 100644
index 0000000000000000000000000000000000000000..7e7d41ddf3f01b3d438261161d93599dc4263f66
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/strain.py
@@ -0,0 +1,176 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import sys
+import numpy as np
+
+### STRAIN CALCULATION ###
+
+max_strain = 0.3
+min_strain = -0.3
+strain_warning_given = False
+c21_by_c11 = 0.69  # C21/C11 elastic constants (almost equal for HgTe and CdTe)
+
+def epsilonx(layer_material, substrate_material, a_lattice = None):
+	"""Strain parameter epsilon
+	If a_lattice is set, use this value as the lattice constant. If a substrate
+	material is set, use its lattice constant. If None, return 0, denoting the
+	absence of strain.
+
+	Arguments:
+	layer_material      Material instance
+	substrate_material  Material instance or None.
+	a_lattice           Number or None. The lattice constant of the strained
+	                    material.
+
+	Returns:
+	strain_epsilon    Relative strain value.
+	"""
+	if substrate_material is None and a_lattice is None:  # special case: unstrained layer
+		return 0
+	a_layer = layer_material['a']
+	if a_lattice is None:
+		a_lattice = substrate_material['a']
+	return (a_lattice - a_layer) / a_layer
+
+def lattice_const_from_strain(epsilon, reference_material):
+	"""Calculate lattice constant from (relative) strain"""
+	a_ref = reference_material['a']
+	if epsilon is None or epsilon == 'none':
+		return a_ref
+	elif isinstance(epsilon, (tuple, list)) and len(epsilon) == 3:
+		epsxx, epsyy, epszz = strain_automatic(epsilon)
+		return a_ref * (1.0 + epsxx)
+	elif isinstance(epsilon, (float, np.floating, int, np.integer)):
+		return a_ref * (1.0 + epsilon)
+	else:
+		raise TypeError("Argument epsilon must be a float or a tuple/list of length 3")
+
+def strain_epsilondiag(layer_material, substrate_material, strain = None, a_lattice = None, hide_strain_warning = False):
+	"""Diagonal of the epsilon (strain) matrix
+	If the relative lattice deformation argument epsilon is defined, use this
+	value. Otherwise, calculate it from the substrate material.
+
+	Arguments:
+	layer_material       Material instance
+	substrate_material   Material instance or None.
+	strain               None, 'none', float, or 3-tuple. If None, use the other
+	                     parameters (a_lattice or substrate_material). If
+	                     'none', treat as 0. If float, the strain value in x
+	                     direction. If a 3-tuple, the strain values in x, y, z
+	                     directions.
+	a_lattice            Number or None. The lattice constant of the strained
+	                     material.
+	hide_strain_warning  True or False. If True, hide the warning issued when
+	                     lattice constant and substrate material are both given
+	                     explicitly.
+
+	Returns:
+	List or tuple of length 3. The diagonal components of the strain tensor
+	epsilon.
+	"""
+	global strain_warning_given
+
+	if a_lattice is not None and substrate_material is not None and (not hide_strain_warning) and (not strain_warning_given):
+		sys.stderr.write("Warning (strain_epsilondiag): The lattice constant is given explicitly, so the substrate parameters are ignored.\n")
+		strain_warning_given = True
+
+	if strain is None:
+		epsilon = epsilonx(layer_material, substrate_material, a_lattice)
+	elif strain == 'none':
+		epsilon = 0.0
+	elif (isinstance(strain, (tuple, list)) and len(strain) == 3) or isinstance(strain, (float, np.floating, int, np.integer)):
+		epsilon = strain
+	else:
+		raise TypeError("Argument strain must be a float or a tuple/list of length 3")
+
+	epsilon_xx_yy_zz = strain_automatic(epsilon)
+	if (max(epsilon_xx_yy_zz) > max_strain or min(epsilon_xx_yy_zz) < min_strain) and layer_material.name != 'Va':
+		sys.stderr.write("ERROR (strain_epsilondiag): Relative strain value exceeds bounds [%i%%, +%i%%].\n" % (100 * min_strain, 100 * max_strain))
+		exit(1)
+	return epsilon_xx_yy_zz
+
+def strain_automatic(epsilon):
+	"""Substitute values for None for given strain
+
+	The substitution rules are applied as follows:
+	Rule 0:  If all values are None, assume zero strain.
+	Rule 1:  Set in-plane components to be equal, if only one is specified.
+	Rule 2:  Determine in-plane from out-of-plane component or vice-versa; for
+	         this, minimize the energy functional as given by Ref. [1], Eq. (3),
+	         using the unknown strain components as variables.
+
+	Note:
+	These rules are valid for crystal orientation (001).
+
+	Reference:
+	[1] De Caro and Tapfer, Phys. Rev. B 51, 4374 (1995)
+	"""
+	if epsilon is None or epsilon == 'none':
+		return None
+	elif isinstance(epsilon, (tuple, list)) and len(epsilon) == 3:
+		epsxx, epsyy, epszz = epsilon
+		# Rule 0: If all values are None, assume no strain
+		if epsxx is None and epsyy is None and epszz is None:
+			return [0.0, 0.0, 0.0]
+		# Rule 1: Set strain to be isotropic in-plane, if only one of epsxx or
+		# epsyy is given
+		if epsxx is None and epsyy is not None:
+			epsxx = epsyy
+		if epsyy is None and epsxx is not None:
+			epsyy = epsxx
+		# Rule 2a: Determine in-plane strain from epszz
+		if epsxx is None and epszz is not None:
+			epsxx = epszz * -c21_by_c11 / (1 + c21_by_c11)
+			epsyy = epsxx
+		# Rule 2b: Determine epszz from in-plane strain
+		if epszz is None and epsxx is not None and epsyy is not None:
+			epszz = (epsxx + epsyy) * -c21_by_c11
+
+		# TODO: Other functions need to be adapted to accommodate anisotropic
+		# in-plane strain
+		if epsxx is not None and epsyy is not None and epsxx != epsyy:
+			sys.stderr.write("ERROR (strain_automatic): Anisotropic in-plane strain is not (yet) supported.\n")
+			exit(1)
+		return [epsxx, epsyy, epszz]
+	elif isinstance(epsilon, (float, np.floating, int, np.integer)):
+		return [epsilon, epsilon, -2 * epsilon * c21_by_c11]
+	else:
+		raise TypeError("Argument epsilon must be a float or a tuple/list of length 3")
diff --git a/kdotpy-v1.0.0/src/kdotpy/symbolic.py b/kdotpy-v1.0.0/src/kdotpy/symbolic.py
new file mode 100644
index 0000000000000000000000000000000000000000..46c168ed73a3bd1b91e45bcc9c21dca7b90abb70
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/symbolic.py
@@ -0,0 +1,1506 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import sys
+import warnings
+import numpy as np
+from itertools import combinations
+from scipy.sparse import csc_matrix, dok_matrix, issparse, SparseEfficiencyWarning
+
+from .physconst import eoverhbar
+from .momentum import Vector
+from .lltools import delta_n_ll
+
+def polar(z, fmt = None, degfmt = None):
+	"""Format function for displaying complex numbers in polar form.
+
+	z       Complex number.
+	fmt     String. Format for a numeric (float) value. Default '%s'.
+	degfmt  String. Format for angular value in degrees. Default '%s'.
+
+	Returns:
+	String.
+	"""
+	if fmt is None:
+		fmt = '%s'
+	if degfmt is None:
+		degfmt = '%s'
+	ab = abs(z)
+	ar = np.angle(z)
+	if ab < 1e-10:
+		return "0"
+	if abs(ar) < 1e-3:
+		return fmt % ab
+	if abs(ar - np.pi) < 1e-3 or abs(ar + np.pi) < 1e-3:
+		return "-" + fmt % ab
+	if abs(ar - 0.5 * np.pi) < 1e-3:
+		return (fmt % ab) + "j"
+	if abs(ar + 0.5 * np.pi) < 1e-3:
+		return "-" + (fmt % ab) + "j"
+
+	return (fmt % ab) + " exp(1.j * " + (degfmt % (ar * 180 / np.pi)) + " deg)"
+
+def reciprocal_energies(em1, em2, el):
+	"""Calculate 1 / (e_1 - e) + 1 / (e_2 - e), iterating over e."""
+	el = np.asarray(el)
+	return 1. / (em1 - el) + 1. / (em2 - el)
+
+def ismatrix(x):
+	"""Test if object is a 2d numpy array or a scipy sparse object"""
+	return (isinstance(x, np.ndarray) or issparse(x)) and x.ndim == 2
+
+def ismatrixlist(x):
+	"""Test is a list represents a matrix.
+	All sublists must be of equal length."""
+	if isinstance(x, list) and len(x) > 0:
+		if not (isinstance(x[0], list) and len(x[0]) > 0):
+			return False
+		dim = len(x[0])
+		for x1 in x:
+			if not (isinstance(x1, list) and len(x1) == dim):
+				return False
+		return True
+	else:
+		return False
+
+def spmatrix_broadcast(inputmat, opmat):
+	"""Multiply the value (matrix M_op) with an operator matrix.
+	'Broadcast' the operator matrix over the full matrix M_op, which has size
+	(N norbitals) x (N norbitals).
+	"""
+	norb = opmat.shape[0]
+	opmat = dok_matrix(opmat)
+
+	outputmat = 0. * inputmat
+
+	if opmat.nnz == 0:
+		return outputmat
+
+	# The following raises a SparseEfficiencyWarning (scipy);
+	# the warning recommends using lil_matrix, but this
+	# happens to be slower than csc_matrix. We can thus
+	# ignore the warning message. After all, this is
+	# column slicing, and this should be efficient for
+	# a csc_matrix, so I don't understand why the warning
+	# is issued. That is the reason for suppressing it.
+	with warnings.catch_warnings(record = SparseEfficiencyWarning):
+		for idx, val in opmat.items():
+			outputmat[idx[0]::norb, idx[1]::norb] = inputmat[idx[0]::norb, idx[1]::norb] * val
+	return outputmat
+
+_op_degree_warning = False
+def op_eval(op, k, eB):
+	"""Evaluate an abstract operator product.
+	Take into account nonzero commutator between k+ and k-.
+
+	Arguments:
+	op   String of + and -, where + stands for k+ and - for k-.
+	k    Vector instance or 2-tuple. The vector value (kx, ky). For Vector
+	     instances, nonzero kz values are ignored.
+	eB   Vector or float. Magnetic field in z direction, times e / hbar.
+
+	Returns:
+	A number (float or complex).
+	"""
+	global _op_degree_warning
+	if isinstance(k, Vector):
+		k = k.xy()
+	elif isinstance(k, tuple) and len(k) == 2:
+		pass
+	else:
+		raise TypeError("k must be a 2-tuple or a Vector instance")
+	if isinstance(eB, Vector):
+		eB = eB.z()
+	elif isinstance(eB, (float, np.floating, int, np.integer)):
+		pass
+	else:
+		raise TypeError("eB must be a number or a Vector instance")
+	kp, km = k[0] + 1.j * k[1], k[0] - 1.j * k[1]
+
+	if op == "":
+		return 1.0
+	# first order
+	elif op == "+":
+		return kp
+	elif op == "-":
+		return km
+	# second order
+	elif op == "++":
+		return kp**2 - eB
+	elif op == "--":
+		return km**2 + eB
+	elif op == "+-":
+		return (k[0]**2 + k[1]**2) - eB
+	elif op == "-+":
+		return (k[0]**2 + k[1]**2) + eB
+	# third order
+	elif op == "+++":
+		return kp**3 - 3 * eB * kp
+	elif op == "---":
+		return km**3 + 3 * eB * km
+	elif op == "++-":
+		return kp**2 * km + eB * (-3. * k[0] - 1.j * k[1])
+	elif op == "+-+":
+		return kp**2 * km + eB * (     -k[0] + 1.j * k[1])
+	elif op == "-++":
+		return kp**2 * km + eB * (      k[0] + 3.j * k[1])
+	elif op == "--+":
+		return kp * km**2 + eB * ( 3. * k[0] - 1.j * k[1])
+	elif op == "-+-":
+		return kp * km**2 + eB * (      k[0] + 1.j * k[1])
+	elif op == "+--":
+		return kp * km**2 + eB * (     -k[0] + 3.j * k[1])
+	# fourth order
+	elif op == "++++":
+		return kp**4 + 3 * eB * ( eB - 2. * kp**2)
+	elif op == "----":
+		return km**4 + 3 * eB * ( eB + 2. * km**2)
+	elif op == "+-+-":
+		return (k[0]**2 + k[1]**2)**2 - 2. * eB * km**2 - eB**2
+	elif op == "-+-+":
+		return (k[0]**2 + k[1]**2)**2 + 2. * eB * kp**2 - eB**2
+	elif op == "++--":
+		return (k[0]**2 + k[1]**2)**2 - 4. * eB * (k[0]**2 + k[1]**2 - 1.j * k[0] * k[1]) + eB**2
+	elif op == "--++":
+		return (k[0]**2 + k[1]**2)**2 + 4. * eB * (k[0]**2 + k[1]**2 + 1.j * k[0] * k[1]) + eB**2
+	elif op == "+--+" or op == "-++-":
+		return (k[0]**2 + k[1]**2)**2 - 4.j * eB * k[0] * k[1] - 3. * eB**2
+	elif op == "+++-":
+		return kp**3 * km + 2. * eB * (-3. * k[0]**2 - 3.j * k[0] * k[1]               ) + 3. * eB**2
+	elif op == "++-+":
+		return kp**3 * km + 2. * eB * (-2. * k[0]**2 - 1.j * k[0] * k[1] -      k[1]**2) +      eB**2
+	elif op == "+-++":
+		return kp**3 * km + 2. * eB * (     -k[0]**2 + 1.j * k[0] * k[1] - 2. * k[1]**2) -      eB**2
+	elif op == "-+++":
+		return kp**3 * km + 2. * eB * (                3.j * k[0] * k[1] - 3. * k[1]**2) - 3. * eB**2
+	elif op == "---+":
+		return kp * km**3 + 2. * eB * ( 3. * k[0]**2 - 3.j * k[0] * k[1]               ) + 3. * eB**2
+	elif op == "--+-":
+		return kp * km**3 + 2. * eB * ( 2. * k[0]**2 - 1.j * k[0] * k[1] +      k[1]**2) +      eB**2
+	elif op == "-+--":
+		return kp * km**3 + 2. * eB * (      k[0]**2 + 1.j * k[0] * k[1] + 2. * k[1]**2) -      eB**2
+	elif op == "+---":
+		return kp * km**3 + 2. * eB * (                3.j * k[0] * k[1] + 3. * k[1]**2) - 3. * eB**2
+	else:
+		if not _op_degree_warning:
+			_op_degree_warning = True
+			sys.stderr.write("Warning (op_eval): Operators of degree > 4 in kp and km were neglected.\n")
+		return 0.0
+
+def op_eval_ll(op, n, eB):
+	"""Evaluate an abstract operator product at Landau level n.
+	Only consider terms up to quadratic order.
+
+	Arguments:
+	op   String of + and -, where + stands for k+ and - for k-.
+	n    Integer. The LL index. The result is 0 for n < 0.
+	eB   Vector or float. Magnetic field in z direction, times e / hbar.
+
+	Returns:
+	Float.
+	"""
+	global _op_degree_warning
+	if n < 0:
+		return 0.0
+
+	if op == "":
+		return 1.0
+	# first order
+	elif op == "+":
+		return np.sqrt(2 * abs(eB) * (n + 1)) if eB >= 0 else -np.sqrt(2 * abs(eB) * n)
+	elif op == "-":
+		return np.sqrt(2 * abs(eB) * n) if eB >= 0 else -np.sqrt(2 * abs(eB) * (n + 1))
+	# second order
+	elif op == "++":  # kp**2 - eB
+		return 2 * abs(eB) * np.sqrt((n + 1) * (n + 2)) if eB >= 0 else 0.0 if n <= 1 else 2 * abs(eB) * np.sqrt(n * (n - 1))
+	elif op == "--":  # km**2 + eB
+		return 2 * abs(eB) * np.sqrt((n + 1) * (n + 2)) if eB < 0 else 0.0 if n <= 1 else 2 * abs(eB) * np.sqrt(n * (n - 1))
+	elif op == "+-":  # (k[0]**2 + k[1]**2) - eB
+		return 2 * abs(eB) * n if eB >= 0 else 2 * abs(eB) * (n + 1)
+	elif op == "-+":  # (k[0]**2 + k[1]**2) + eB
+		return 2 * abs(eB) * (n + 1) if eB >= 0 else 2 * abs(eB) * n
+	else:
+		if not _op_degree_warning:
+			_op_degree_warning = True
+			sys.stderr.write("Warning (op_eval): Operators of degree > 2 in kp and km were neglected.\n")
+		return 0.0
+
+def _apply_kp(c, alpha, beta, gamma):
+	"""Apply k+ to the term  c kx^alpha ky^beta (eB)^gamma.
+
+	Arguments:
+	c, alpha, beta, gamma   Numerical values that encodes the term described
+	                        above.
+
+	Returns:
+	List of 4-tuples (ci, alphai, betai, gammai). This encodes a sum of terms as
+	described above.
+	"""
+	if alpha > 0:
+		return [(c, alpha+1, beta, gamma), (1.j * c, alpha, beta+1, gamma), (-alpha * c, alpha - 1, beta, gamma + 1)]
+	else:
+		return [(c, alpha+1, beta, gamma), (1.j * c, alpha, beta+1, gamma)]
+
+def _apply_km(c, alpha, beta, gamma):
+	"""Apply k- to the term  c kx^alpha ky^beta (eB)^gamma.
+
+	Arguments:
+	c, alpha, beta, gamma   Numerical values that encodes the term described
+	                        above.
+
+	Returns:
+	List of 4-tuples (ci, alphai, betai, gammai). This encodes a sum of terms as
+	described above.
+	"""
+	if alpha > 0:
+		return [(c, alpha+1, beta, gamma), (-1.j * c, alpha, beta+1, gamma), (alpha * c, alpha - 1, beta, gamma + 1)]
+	else:
+		return [(c, alpha+1, beta, gamma), (-1.j * c, alpha, beta+1, gamma)]
+
+def _count_pm(op):
+	"""Count number of + and number of - in operator."""
+	pm = [0, 0]
+	for o in op:
+		if o == '+':
+			pm[0] += 1
+		elif o == '-':
+			pm[1] += 1
+		else:
+			raise ValueError("ERROR (_count_pm): Illegal operator. Only + and - are allowed.")
+	return tuple(pm)
+
+def opsum_kx_ky_reduce(opsum):
+	"""Reduce operator sum of kx and ky
+
+	Returns:
+	kx_ky_eB_sum   A dict instance of the form {(alpha, beta, gamma): c, ...}.
+	               This dict encodes the sum of terms
+	               c kx^alpha ky^beta (eB)^gamma.
+	"""
+	kx_ky_eB_sum = {}
+	for op in opsum:
+		xx = [(opsum[op], 0, 0, 0)]  # initial (c, alpha, beta, gamma)
+		for o in op[::-1]:
+			if o == '+':
+				xx1 = [_apply_kp(*x) for x in xx]
+			elif o == '-':
+				xx1 = [_apply_km(*x) for x in xx]
+			else:
+				raise ValueError("ERROR (opsum_kx_ky_reduce): Illegal operator. Only + and - are allowed.")
+			xx = [x for x1 in xx1 for x in x1]  # flatten list
+			# print ("%s:"%o, xx)
+		for x in xx:
+			if (x[1], x[2], x[3]) in kx_ky_eB_sum:
+				kx_ky_eB_sum[(x[1], x[2], x[3])] += x[0]
+			else:
+				kx_ky_eB_sum[(x[1], x[2], x[3])] = x[0]
+		# print (kx_ky_eB_sum)
+	return kx_ky_eB_sum
+
+def kp_km_to_kx_ky(opsum):
+	"""Convert operator sum in terms of k+ and k- into kx and ky."""
+	kx_ky_sum = {}
+	for op in opsum:
+		xx = {"": opsum[op]}
+		for o in op:
+			if o == '+':
+				xx_x = {x + "x": 1 * xx[x] for x in xx}
+				xx_y = {x + "y": 1j * xx[x] for x in xx}
+			elif o == '-':
+				xx_x = {x + "x": 1 * xx[x] for x in xx}
+				xx_y = {x + "y": -1j * xx[x] for x in xx}
+			else:
+				raise ValueError("ERROR (opsum_kx_ky_reduce): Illegal operator. Only + and - are allowed.")
+			xx = xx_x
+			xx.update(xx_y)
+
+		for x in xx:
+			if x in kx_ky_sum:
+				kx_ky_sum[x] += 1 * xx[x]
+			else:
+				kx_ky_sum[x]  = 1 * xx[x]
+	return kx_ky_sum
+
+
+def op_kx_ky_eB(opsum, kx = "kx", ky = "ky", eB = "eB", fmt = None, degfmt = None):
+	"""String formatter for operator sum in terms of kx, ky, and eB
+
+	Arguments:
+	opsum   Operator sum, i.e., dict whose elements are operator strings and
+	        whose values are numbers, arrays, matrices, etc.
+	kx      String for kx.
+	ky      String for ky.
+	eB      String for eB (magnetic field times e / hbar).
+	fmt     String. Format for a numeric (float) value. Default '%s'.
+	degfmt  String. Format for angular value in degrees. Default '%s'.
+
+	Returns:
+	String.
+	"""
+	kx_ky_eB_sum = opsum_kx_ky_reduce(opsum)
+	s = ""
+	for kx_ky_eB in kx_ky_eB_sum:
+		alpha, beta, gamma = kx_ky_eB
+		kxs = "" if alpha == 0 else kx + " " if alpha == 1 else "%s^%i " % (kx, alpha)
+		kys = "" if beta  == 0 else ky + " " if beta  == 1 else "%s^%i " % (ky, beta)
+		eBs = "" if gamma == 0 else eB + " " if gamma == 1 else "%s^%i " % (eB, gamma)
+		val = kx_ky_eB_sum[kx_ky_eB]
+		# vals = "+ " + str(val) + " " if val > 0.0 else "- " + str(-val) + " " if val < 0.0 else 0
+		vals = "+ " + polar(val, fmt = fmt, degfmt = degfmt) + " "
+
+		s += (vals + kxs + kys + eBs)
+		# print (s)
+
+	return s
+
+def opsum_times_opsum(opsum1, opsum2, const = 1):
+	"""Calculate the product of two operator sums.
+
+	Arguments:
+	opsum1  Operator sum, i.e., dict whose elements are operator strings and
+	        whose values are numbers, arrays, matrices, etc.
+	opsum2  Operator sum.
+
+	Note:
+	This product is non-commutative.
+
+	Returns:
+	opsump  Operator sum. The product opsum1 times opsum2.
+	"""
+	opsump = {}  # 'p' = product
+	for op1 in opsum1:
+		for op2 in opsum2:
+			op = op1 + op2  # concatenate strings
+			if op in opsump:
+				opsump[op] += const * (opsum1[op1] * opsum2[op2])
+			else:
+				opsump[op]  = const * (opsum1[op1] * opsum2[op2])
+	return opsump
+
+def op_kshift(op, delta_kp, delta_km):
+	"""Apply shift (k+, k-) --> (k+ + Δa, k- + Δb) to operator"""
+	n = len(op)
+	new_opsum = {}
+	for k in range(0, n + 1):
+		for p in combinations(range(n), k):
+			op_new = "".join([o for j, o in enumerate(op) if j not in p])
+			coeff = np.prod([delta_kp if o == '+' else delta_km if o == '-' else 0 for j, o in enumerate(op) if j in p])
+			if op_new in new_opsum:
+				new_opsum[op_new] += coeff
+			else:
+				new_opsum[op_new] = coeff
+	return new_opsum
+
+def opsum_evaluate(opsum, k, magn):
+	"""Evaluate operator sum at momentum and magnetic field.
+
+	Arguments:
+	opsum   Operator sum, i.e., dict whose elements are operator strings and
+	        whose values are numbers, arrays, matrices, etc.
+	k       Vector instance or 2-tuple. Momentum value. For a Vector instance,
+	        the kz component is ignored.
+	magn    Vector instance or float. Magnetic field in tesla, not yet
+	        multiplied by e / hbar. Only the out-of-plane component is
+	        considered.
+
+	Returns:
+	Number (float or complex), array, matrix, etc. The evaluated operator sum.
+	"""
+	eB = eoverhbar * magn
+	total = sum([op_eval(op, k, eB) * opsum[op] for op in opsum])
+	# Mixed sums of sparse matrices and dense arrays evaulate to a
+	# numpy.matrix. We thus explicitly cast dense results to numpy.array.
+	return np.array(total) if isinstance(total, np.matrix) and not issparse(total) else total
+
+def str_to_op(opstr):
+	"""Get operator string from generic string."""
+	spl = opstr.split(" ")
+	op = ""
+	for s1 in spl:
+		s = s1.strip().lstrip().lower().replace('_', '')
+		if s in ['kp', 'k+', '+']:
+			op += '+'
+		elif s in ['km', 'k-', '-']:
+			op += '-'
+		else:
+			raise ValueError("Operator string must consist of kp (k+, +) or km (k-, -) separated by spaces")
+	return op
+
+class SymbolicObject:
+	"""Container class for a symbolic object.
+	A SymbolicObject encodes a sum of terms containing the noncommuting
+	operators k+ and k-.
+
+	Attributes:
+	opsum   Dict instance, whose keys are strings containing + and -, which
+	        encode the operators k+ and k-. The values are the coefficients.
+	"""
+	def __init__(self, *arg):
+		if len(arg) == 0:
+			self.opsum = {}
+		elif len(arg) == 1 and isinstance(arg[0], dict):
+			self.opsum = arg[0]
+		elif len(arg) == 1 and isinstance(arg[0], (float, np.floating, int, np.integer, complex, np.complex_)):
+			self.opsum = {"": arg[0]}
+		elif len(arg) == 1 and isinstance(arg[0], str):
+			op = str_to_op(arg[0])
+			self.opsum = {op: 1.0}
+		elif len(arg) == 2 and isinstance(arg[0], str) and isinstance(arg[1], (float, np.floating, int, np.integer, complex, np.complex_)):
+			op = str_to_op(arg[0])
+			self.opsum = {op: arg[1]}
+		elif len(arg) == 2 and isinstance(arg[1], str) and isinstance(arg[0], (float, np.floating, int, np.integer, complex, np.complex_)):
+			op = str_to_op(arg[1])
+			self.opsum = {op: arg[0]}
+		elif len(arg) == 0:
+			self.opsum = {"", 0.0}
+
+	def __repr__(self):
+		return str(self.opsum)
+
+	def __str__(self):
+		s = 'SymbolicObject(\n'
+		for op in self.opsum:
+			ops = str(op)
+			vals = str(self.opsum[op])
+			s += "%s:%s%s\n" % (ops, "\n" if "\n" in vals else " ", vals)
+		s += ")"
+		return s
+
+	def __neg__(self):
+		"""Minus self"""
+		new_opsum = {}
+		for op in self.opsum:
+			new_opsum[op] = -self.opsum[op]
+		return SymbolicObject(new_opsum)
+
+	def __add__(self, other):
+		"""SymbolicObject + SymbolicObject or SymbolicObject + number"""
+		new_opsum = {}
+		if isinstance(other, SymbolicObject):
+			for op in self.opsum:
+				new_opsum[op] = 1 * self.opsum[op]
+			for op in other.opsum:
+				if op in new_opsum:
+					new_opsum[op] += other.opsum[op]
+				else:
+					new_opsum[op] = 1 * other.opsum[op]
+		elif isinstance(other, (float, np.floating, int, np.integer, complex, np.complex_)):
+			for op in self.opsum:
+				new_opsum[op] = 1 * self.opsum[op]
+			if "" in new_opsum:
+				new_opsum[""] += other
+			else:
+				new_opsum[""] = 1 * other
+		else:
+			raise ValueError("Arithmetic operation + only for two SymbolicObjects or SymbolicObject and scalar")
+		return SymbolicObject(new_opsum)
+
+	def __radd__(self, other):
+		"""number + SymbolicObject"""
+		new_opsum = {}
+		if isinstance(other, (float, np.floating, int, np.integer, complex, np.complex_)):
+			for op in self.opsum:
+				new_opsum[op] = 1 * self.opsum[op]
+			if "" in new_opsum:
+				new_opsum[""] += other
+			else:
+				new_opsum[""] = 1 * other
+		else:
+			raise ValueError("Arithmetic operation + only for two SymbolicObjects or SymbolicObject and scalar")
+		return SymbolicObject(new_opsum)
+
+	def __iadd__(self, other):
+		"""SymbolicObject += SymbolicObject or SymbolicObject += number"""
+		if isinstance(other, SymbolicObject):
+			for op in other.opsum:
+				if op in self.opsum:
+					self.opsum[op] += other.opsum[op]
+				else:
+					self.opsum[op] = 1 * other.opsum[op]
+		elif isinstance(other, (float, np.floating, int, np.integer, complex, np.complex_)):
+			if "" in self.opsum:
+				self.opsum[""] += other
+			else:
+				self.opsum[""] = 1 * other
+		else:
+			raise ValueError("Arithmetic operation + only for two SymbolicObjects or SymbolicObject and scalar")
+		return self
+
+	def __sub__(self, other):
+		"""SymbolicObject - SymbolicObject or SymbolicObject - number"""
+		return self + (-other)  # combination of __add__ and __neg__
+
+	def __rsub__(self, other):
+		"""Number - SymbolicObject"""
+		return other + (-self)
+
+	def __mul__(self, other):
+		"""SymbolicObject times SymbolicObject, SymbolicMatrix, or number"""
+		if isinstance(other, SymbolicMatrix):
+			return other.__rmul__(self)
+		elif isinstance(other, SymbolicObject):
+			opsump = {}
+			for op1 in self.opsum:
+				for op2 in other.opsum:
+					op = op1 + op2  # concatenate strings
+					if op in opsump:
+						opsump[op] += (self.opsum[op1] * other.opsum[op2])
+					else:
+						opsump[op]  = (self.opsum[op1] * other.opsum[op2])
+			return SymbolicObject(opsump)
+		elif isinstance(other, (float, np.floating, int, np.integer, complex, np.complex_)):
+			new_opsum = {}
+			for op in self.opsum:
+				new_opsum[op] = other * self.opsum[op]
+			return SymbolicObject(new_opsum)
+		else:
+			raise ValueError("Arithmetic operator * only for two SymbolicObject objects or SymbolicObject and scalar")
+
+	def __rmul__(self, other):
+		"""SymbolicObject times SymbolicObject or number times SymbolicObject"""
+		if isinstance(other, SymbolicObject):
+			opsump = {}
+			for op1 in other.opsum:
+				for op2 in self.opsum:
+					op = op1 + op2  # concatenate strings
+					if op in opsump:
+						opsump[op] += (other.opsum[op1] * self.opsum[op2])
+					else:
+						opsump[op]  = (other.opsum[op1] * self.opsum[op2])
+			return SymbolicObject(opsump)
+
+		elif isinstance(other, (float, np.floating, int, np.integer, complex, np.complex_)):
+			new_opsum = {}
+			for op in self.opsum:
+				new_opsum[op] = other * self.opsum[op]
+			return SymbolicObject(new_opsum)
+		else:
+			raise ValueError("Arithmetic operator * only for two SymbolicObject objects or SymbolicObject and scalar")
+
+	def __eq__(self, other):
+		return self.opsum == other.opsum
+
+	def conjugate(self):
+		new_opsum = {}
+		for op in self.opsum:
+			new_op = "".join(['+' if c == '-' else '-' for c in op[::-1]])
+			new_opsum[new_op] = self.opsum[op].conjugate()
+		return SymbolicObject(new_opsum)
+
+	def shift(self, k):
+		"""Shift momentum values by amount k.
+
+		Arguments:
+		k    Vector instance or 2-tuple. The vector value (kx, ky). For Vector
+			 instances, nonzero kz values are ignored.
+
+		Returns:
+		The same type as the coefficients. Float or complex numbers for the base
+		class, numpy array for the derived class SymbolicMatrix.
+		"""
+		if isinstance(k, Vector):
+			kx, ky = k.xy()
+		elif isinstance(k, tuple) and len(k) == 2:
+			kx, ky = k
+		else:
+			raise TypeError("Argument k must be a Vector instance or a 2-tuple")
+		kp = kx + 1.j * ky
+		km = kx - 1.j * ky
+		new_opsum = {}
+		for op in self.opsum:
+			op_coeff = op_kshift(op, kp, km)
+			for new_op in op_coeff:
+				if new_op in new_opsum:
+					new_opsum[new_op] += op_coeff[new_op] * self.opsum[op]
+				else:
+					new_opsum[new_op] = op_coeff[new_op] * self.opsum[op]
+		return SymbolicObject(new_opsum)
+
+	def evaluate(self, k, eB):
+		"""Evaluate an abstract operator product.
+		Take into account nonzero commutator between k+ and k-.
+
+		Arguments:
+		k    Vector instance or 2-tuple. The vector value (kx, ky). For Vector
+			 instances, nonzero kz values are ignored.
+		eB   Vector or float. Magnetic field in z direction, times e / hbar.
+
+		Returns:
+		The same type as the coefficients. Float or complex numbers for the base
+		class, numpy array for the derived class SymbolicMatrix.
+		"""
+		total = sum([self.opsum[op] * op_eval(op, k, eB) for op in self.opsum])
+		# Mixed sums of sparse matrices and dense arrays evaulate to a
+		# numpy.matrix. We thus explicitly cast dense results to numpy.array.
+		return total if issparse(total) else np.array(total)
+
+	# ll_evaluate: See implementation for SymbolicMatrix
+
+	def maxorder(self, maxord):
+		"""Concatenate to certain order and discard all higher order terms"""
+		return SymbolicObject({op: 1 * self.opsum[op] for op in self.opsum if len(op) <= maxord})
+
+	def leadingorder(self, value):
+		"""Get terms leading order with coefficients larger than value
+
+		Argument:
+		value   Float. The leading order is the minimal order with coefficients
+		        that exceed value (in magnitude).
+
+		Returns:
+		A new SymbolicObject instance.
+		"""
+		order = 100  # just a large number
+		for op in self.opsum:
+			if abs(self.opsum[op]) > value:
+				order = min(order, len(op))
+		new_opsum = {}
+		for op in self.opsum:
+			if abs(self.opsum[op]) > value:
+				new_opsum[op] = 1 * self.opsum[op]
+		return SymbolicObject(new_opsum)
+
+	def chop(self, value = 1e-10):
+		"""Discard all terms with coefficients below the cutoff value
+
+		Argument:
+		value   Float. Cutoff value.
+
+		Returns:
+		A new SymbolicObject instance.
+		"""
+		new_opsum = {}
+		for op in self.opsum:
+			if abs(self.opsum[op]) >= value:
+				if isinstance(self.opsum[op], complex):
+					if abs(np.real(self.opsum[op])) < value:
+						new_opsum[op] = 1j * np.imag(self.opsum[op])
+					elif abs(np.imag(self.opsum[op])) < value:
+						new_opsum[op] = 1 * np.real(self.opsum[op])
+					else:
+						new_opsum[op] = 1 * self.opsum[op]
+				else:
+					new_opsum[op] = 1 * self.opsum[op]
+		return SymbolicObject(new_opsum)
+
+	def iszero(self, value = 0.0):
+		"""Test whether SymbolicObject is zero up to a (small) value
+
+		Argument:
+		value   Float. Threshold value, below which coefficients are regarded as
+		        zero.
+
+		Returns:
+		True or False.
+		"""
+		for op in self.opsum:
+			if abs(self.opsum[op]) > value:
+				return False
+		return True
+
+	def kx_ky_eB_str(self, kxstr = "kx", kystr = "ky", eBstr = "eB", print_zeros = False, fmt = None, degfmt = None):
+		"""Format SymbolicObject as sum of terms involving kx, ky, and eB.
+
+		Arguments:
+		kxstr        String for kx.
+		kystr        String for ky.
+		eBstr        String for eB (magnetic field times e / hbar).
+		print_zeros  True or False. If True, also include terms that are
+		             (almost) zero. If False, omit these.
+		fmt          String. Format for a numeric (float) value. Default '%s'.
+		degfmt       String. Format for angular value in degrees. Default '%s'.
+
+		Returns:
+		String.
+		"""
+		if self.opsum == {} or self.iszero(1e-7):
+			return "0"
+		kx_ky_eB_sum = opsum_kx_ky_reduce(self.opsum)
+		s = ""
+		for kx_ky_eB in kx_ky_eB_sum:
+			val = kx_ky_eB_sum[kx_ky_eB]
+			if not print_zeros and abs(val) < 1e-7:
+				continue
+			vals = "+ " + polar(val, fmt = fmt, degfmt = degfmt) + " "
+			alpha, beta, gamma = kx_ky_eB
+			kxs = "" if alpha == 0 else kxstr + " " if alpha == 1 else "%s^%i " % (kxstr, alpha)
+			kys = "" if beta  == 0 else kystr + " " if beta  == 1 else "%s^%i " % (kystr, beta)
+			eBs = "" if gamma == 0 else eBstr + " " if gamma == 1 else "%s^%i " % (eBstr, gamma)
+			s += (vals + kxs + kys + eBs)
+		return s
+
+	def kp_km_str(self, kpstr = "k+", kmstr = "k-", eBstr = "eB", print_zeros = False, fmt = None, degfmt = None):
+		"""Format SymbolicObject as sum of terms involving k+, k-, and eB.
+
+		Arguments:
+		kpstr        String for k+.
+		kmstr        String for k-.
+		eBstr        String for eB (magnetic field times e / hbar).
+		print_zeros  True or False. If True, also include terms that are
+		             (almost) zero. If False, omit these.
+		fmt          String. Format for a numeric (float) value. Default '%s'.
+		degfmt       String. Format for angular value in degrees. Default '%s'.
+
+		Returns:
+		String.
+		"""
+		if self.opsum == {} or self.iszero(1e-7):
+			return "0"
+		s = ""
+		for op in self.opsum:
+			val = self.opsum[op]
+			if not print_zeros and abs(val) < 1e-7:
+				continue
+			vals = "+ " + polar(val, fmt = fmt, degfmt = degfmt) + " "
+			ops = " ".join([kpstr if o == '+' else kmstr if o == '-' else '?' for o in op])
+			s += (vals + ops + " ")
+		return s
+
+	def k2_eB_str(self, kstr = "k", kpstr = "k+", kmstr = "k-", eBstr = "eB", print_zeros = False, fmt = None, degfmt = None):
+		"""Format SymbolicObject as sum of terms involving (powers of) k, k+, k-, and eB.
+
+		Arguments:
+		kstr         String for k.
+		kpstr        String for k+.
+		kmstr        String for k-.
+		eBstr        String for eB (magnetic field times e / hbar).
+		print_zeros  True or False. If True, also include terms that are
+		             (almost) zero. If False, omit these.
+		fmt          String. Format for a numeric (float) value. Default '%s'.
+		degfmt       String. Format for angular value in degrees. Default '%s'.
+
+		Returns:
+		String.
+		"""
+		if self.opsum == {} or self.iszero(1e-7):
+			return "0"
+		s = ""
+		new_opsum = {}
+		for op in self.opsum:
+			# TODO: Generalize to order > 2
+			if op == '+-':
+				if '2' in new_opsum:  # k^2 term
+					new_opsum['2'] += 1 * self.opsum[op]
+				else:
+					new_opsum['2'] = 1 * self.opsum[op]
+				if 'b' in new_opsum:  # -eB term
+					new_opsum['b'] += -1 * self.opsum[op]
+				else:
+					new_opsum['b'] = -1 * self.opsum[op]
+			elif op == '-+':
+				if '2' in new_opsum:  # k^2 term
+					new_opsum['2'] += 1 * self.opsum[op]
+				else:
+					new_opsum['2'] = 1 * self.opsum[op]
+				if 'b' in new_opsum:  # +eB term
+					new_opsum['b'] += 1 * self.opsum[op]
+				else:
+					new_opsum['b'] = 1 * self.opsum[op]
+			else:
+				if op in new_opsum:  # any other term
+					new_opsum[op] += 1 * self.opsum[op]
+				else:
+					new_opsum[op] = 1 * self.opsum[op]
+
+		for op in new_opsum:
+			val = new_opsum[op]
+			if not print_zeros and abs(val) < 1e-7:
+				continue
+			vals = "+ " + polar(val, fmt = fmt, degfmt = degfmt) + " "
+			ops = " ".join([kpstr if o == '+' else kmstr if o == '-' else kstr + '^2' if o == '2' else eBstr if o == 'b' else '?' for o in op])
+			s += (vals + ops + " ")
+		return s
+
+	def deriv(self, to):
+		"""Take derivative with respect to k+, k-, kx, or ky.
+
+		Argument:
+		to   '+', '-', 'x', or 'y'. Take derivative with respect to this k
+		     component.
+
+		Returns:
+		A new SymbolicObject instance.
+		"""
+		new_opsum = {}
+		if to in ['+', '-']:
+			for op in self.opsum:
+				new_op = [op[:j] + op[j+1:] for j, o in enumerate(op) if o == to]
+				for o in new_op:
+					if o in new_opsum:
+						new_opsum[o] += 1 * self.opsum[op]
+					else:
+						new_opsum[o] = 1 * self.opsum[op]
+			return SymbolicObject(new_opsum)
+		if to == 'x':
+			return self.deriv('+') + self.deriv('-')
+		if to == 'y':
+			return 1.j * (self.deriv('+') - self.deriv('-'))
+		raise ValueError("Derivative only allowed with respect to +, -, x, y")
+
+	def delta_n(self, dn):
+		"""Take terms whose operators are of the degree dn, where k+ counts as +1 and k- as -1.
+
+		Argument:
+		dn   Integer.
+
+		Returns:
+		A new SymbolicObject instance.
+		"""
+		new_opsum = {}
+		for op in self.opsum:
+			if op.count("+") - op.count("-") == dn:
+				new_opsum[op] = 1 * self.opsum[op]
+		return SymbolicObject(new_opsum)
+
+class SymbolicMatrix(SymbolicObject):
+	"""Container for symbolic matrix, i.e., a symbolic object whose coefficients are square matrices.
+
+	This class is derived from SymbolicObject. The coefficients (values of the
+	operator sum) are matrix-valued, i.e., represented by 2-dim numpy arrays.
+
+	Attributes:
+	opsum   Dict instance, whose keys are strings containing + and -, which
+	        encode the operators k+ and k-. The values are the coefficients,
+	        matrix-valued in this case.
+	dim     Integer. Size of the square matrices.
+	"""
+	def __init__(self, *arg):
+		if len(arg) == 1 and isinstance(arg[0], dict):
+			self.opsum = arg[0]
+			self.dim = None
+			for op in self.opsum:
+				if self.dim is None:
+					self.dim = self.opsum[op].shape[0]
+				elif self.opsum[op].shape[0] != self.dim:
+					raise ValueError("All elements in the operator sum must have the same shape")
+		elif len(arg) == 1 and ismatrix(arg[0]):
+			self.opsum = {"": 1 * arg[0]}
+			self.dim = arg[0].shape[0]
+		elif len(arg) == 1 and ismatrixlist(arg[0]):
+			shapex = len(arg[0])
+			shapey = len(arg[0][0])
+			if shapex != shapey:
+				raise NotImplementedError("Input lists must form a square array")
+			self.dim = shapex
+			self.opsum = {}
+			for i in range(0, shapex):
+				for j in range(0, shapey):
+					self.setentry(i, j, arg[0][i][j])
+		elif len(arg) == 2 and isinstance(arg[0], (int, np.integer)) and (isinstance(arg[1], type) or isinstance(arg[1], str)):
+			self.opsum = {"": np.zeros((arg[0], arg[0]), dtype = arg[1])}
+			self.dim = arg[0]
+
+	def __add__(self, other):
+		"""SymbolicMatrix + SymbolicMatrix"""
+		if not isinstance(other, SymbolicMatrix):
+			raise ValueError("Arithmetic operation + only for two SymbolicMatrix objects")
+		new_opsum = {}
+		for op in self.opsum:
+			new_opsum[op] = 1 * self.opsum[op]
+		for op in other.opsum:
+			if op in new_opsum:
+				new_opsum[op] += other.opsum[op]
+			else:
+				new_opsum[op] = 1 * other.opsum[op]  # force copy
+		return SymbolicMatrix(new_opsum)
+
+	def __neg__(self):
+		"""SymbolicMatrix - SymbolicMatrix"""
+		new_opsum = {}
+		for op in self.opsum:
+			new_opsum[op] = -self.opsum[op]
+		return SymbolicMatrix(new_opsum)
+
+	def __mul__(self, other):
+		"""SymbolicMatrix times SymbolicObject, matrix, or number
+		This multiplication involves concatenation of operators and either a
+		scalar multiplication (if argument other involves numbers) or a matrix
+		multiplication (if argument other involves matrices).
+		"""
+		if isinstance(other, SymbolicObject) or ismatrix(other):
+			return self.__matmul__(other)  # matrix multiplication
+		elif isinstance(other, (float, np.floating, int, np.integer, complex, np.complex_)):
+			new_opsum = {}
+			for op in self.opsum:
+				new_opsum[op] = other * self.opsum[op]
+			return SymbolicMatrix(new_opsum)
+		else:
+			raise ValueError("Arithmetic operation * only for two SymbolicMatrix objects or SymbolicMatrix and scalar")
+
+	def __rmul__(self, other):
+		"""SymbolicObject, matrix, or number times SymbolicMatrix
+		This multiplication involves concatenation of operators and either a
+		scalar multiplication (if argument other involves numbers) or a matrix
+		multiplication (if argument other involves matrices).
+		"""
+		if isinstance(other, SymbolicObject) or ismatrix(other):
+			return self.__rmatmul__(other)  # matrix multiplication
+		elif isinstance(other, (float, np.floating, int, np.integer, complex, np.complex_)):
+			new_opsum = {}
+			for op in self.opsum:
+				new_opsum[op] = other * self.opsum[op]
+			return SymbolicMatrix(new_opsum)
+		else:
+			raise ValueError("Arithmetic operation * only for two SymbolicMatrix objects or SymbolicMatrix and np.ndarray or SymbolicMatrix and scalar")
+
+	def __matmul__(self, other):   # right (forward) multiplication
+		"""SymbolicMatrix @ SymbolicObject or matrix (@ is matrix multiplication)
+		This multiplication involves concatenation of operators and a matrix
+		multiplication of the coefficients.
+
+		Note:
+		If the argument 'other' is a SymbolicObject that contains scalars as
+		coefficients in the operator sum, the multiplication is not a matrix
+		multiplication strictly speaking, but a separate implementation is not
+		required.
+		"""
+		opsump = {}  # 'p' = product
+		if isinstance(other, SymbolicObject):
+			for op1 in self.opsum:
+				for op2 in other.opsum:
+					op = op1 + op2  # concatenate strings
+					if op in opsump:
+						opsump[op] += (self.opsum[op1] * other.opsum[op2])
+					else:
+						opsump[op]  = (self.opsum[op1] * other.opsum[op2])
+		elif ismatrix(other):
+			for op in self.opsum:
+				opsump[op] = (self.opsum[op] @ other)
+		else:
+			raise ValueError("Arithmetic operation [matmul] only for two SymbolicMatrix objects or SymbolicMatrix and np.ndarray or SymbolicMatrix and SymbolicObject or SymbolicMatrix and scalar")
+
+		return SymbolicMatrix(opsump)
+
+	def __rmatmul__(self, other):  # left (reverse) multiplication
+		"""SymbolicObject or matrix @ SymbolicMatrix (@ is matrix multiplication)
+		This multiplication involves concatenation of operators and a matrix
+		multiplication of the coefficients.
+
+		Note:
+		If the argument 'other' is a SymbolicObject that contains scalars as
+		coefficients in the operator sum, the multiplication is not a matrix
+		multiplication strictly speaking, but a separate implementation is not
+		required.
+		"""
+		opsump = {}  # 'p' = product
+		if isinstance(other, SymbolicObject):
+			for op1 in self.opsum:
+				for op2 in other.opsum:
+					op = op2 + op1  # concatenate strings
+					if op in opsump:
+						opsump[op] += (other.opsum[op2] * self.opsum[op1])
+					else:
+						opsump[op]  = (other.opsum[op2] * self.opsum[op1])
+		elif ismatrix(other):
+			for op in self.opsum:
+				opsump[op] = (other @ self.opsum[op])
+		else:
+			raise ValueError("Arithmetic operation [matmul] only for two SymbolicMatrix objects or SymbolicMatrix and np.ndarray or SymbolicMatrix and SymbolicObject or SymbolicMatrix and scalar")
+
+		return SymbolicMatrix(opsump)
+
+	def __getitem__(self, arg):
+		"""Get entry (arg is tuple) or operator (arg is str)."""
+		if isinstance(arg, tuple) and len(arg) == 2:
+			return self.getentry(*arg)
+		elif isinstance(arg, str):
+			if arg in self.opsum:
+				return self.opsum[arg]
+			else:
+				raise KeyError
+		else:
+			raise IndexError("Index must be a pair or an operator string")
+
+	def getentry(self, i, j):
+		"""Get entry"""
+		if self.dim is None:
+			raise IndexError("Cannot take element in empty SymbolicMatrix")
+		if 0 > i >= -self.dim:
+			i = self.dim - i
+		if 0 > j >= -self.dim:
+			j = self.dim - j
+		if i < 0 or i >= self.dim or j < 0 or j >= self.dim:
+			raise IndexError("Index (%i, %i) out of bounds for SymbolicMatrix of dimension %i." % (i, j, self.dim))
+
+		new_opsum = {}
+		for op in self.opsum:
+			new_opsum[op] = self.opsum[op][i, j]
+		return SymbolicObject(new_opsum)
+
+	def bramidket(self, vec_v, vec_w):
+		"""Calculate the triple product <v|M|w> for all matrix coefficients M in the operator sum.
+
+		Arguments:
+		vec_v   Numpy array of one dimension and length equal to self.dim.
+		vec_w   Numpy array of one dimension and length equal to self.dim.
+
+		Returns:
+		A new SymbolicObject instance.
+		"""
+		new_opsum = {}
+		for op in self.opsum:
+			val = vec_v.conjugate() @ self.opsum[op] @ vec_w
+			if op in new_opsum:
+				new_opsum[op] += val
+			else:
+				new_opsum[op] = 1 * val
+
+		return SymbolicObject(new_opsum)
+
+	def conjugate(self):
+		"""Complex conjugation, i.e., + to -, - to +, and reversal of operator strings."""
+		new_opsum = {}
+		for op in self.opsum:
+			new_op = "".join(['+' if c == '-' else '-' for c in op[::-1]])
+			new_opsum[new_op] = self.opsum[op].conjugate().transpose()
+		return SymbolicMatrix(new_opsum)
+
+	def shift(self, k):
+		"""Shift momentum values by amount k."""
+		return SymbolicMatrix(super().shift(k).opsum)
+
+	def maxorder(self, maxord):
+		"""Concatenate to certain order and discard all higher order terms"""
+		return SymbolicMatrix({op: 1 * self.opsum[op] for op in self.opsum if len(op) <= maxord})
+
+	def chop(self, value = 1e-10):
+		"""Discard all terms with coefficients below the cutoff value
+
+		Argument:
+		value   Float. Cutoff value.
+
+		Returns:
+		A new SymbolicObject instance.
+		"""
+		new_opsum = {}
+		for op in self.opsum:
+			if issparse(self.opsum[op]):
+				new_opsum[op] = 1. * self.opsum[op]  # make copy
+				z = 0 * self.opsum[op].data
+				re = np.real(self.opsum[op].data)
+				im = np.imag(self.opsum[op].data)
+				new_opsum[op].data = np.where(np.abs(re) >= value, re, z) + 1j * np.where(np.abs(im) >= value, im, z)
+				new_opsum[op].eliminate_zeros()
+			else:
+				z = 0 * self.opsum[op]
+				re = np.real(self.opsum[op])
+				im = np.imag(self.opsum[op])
+				new_opsum[op] = np.where(np.abs(re) >= value, re, z) + 1j * np.where(np.abs(im) >= value, im, z)
+		return SymbolicMatrix(new_opsum)
+
+	def __setitem__(self, arg, value):
+		"""Set entry. (Set operator not implemented.)"""
+		if isinstance(arg, tuple) and len(arg) == 2:
+			self.setentry(arg[0], arg[1], value)
+		elif isinstance(arg, str):
+			raise NotImplementedError
+		else:
+			raise IndexError("Index must be a pair or an operator string")
+
+	def setentry(self, i, j, value):
+		"""Set entry"""
+		if self.dim is None:
+			raise IndexError("Cannot take element in empty SymbolicMatrix")
+		if 0 > i >= -self.dim:
+			i = self.dim - i
+		if 0 > j >= -self.dim:
+			j = self.dim - j
+		if i < 0 or i >= self.dim or j < 0 or j >= self.dim:
+			raise IndexError("Index (%i, %i) out of bounds for SymbolicMatrix of dimension %i." % (i, j, self.dim))
+		if isinstance(value, (float, np.floating, int, np.integer, complex, np.complex_)):
+			if "" not in self.opsum:
+				self.opsum[""] = np.zeros((self.dim, self.dim), dtype = complex)  # alternative: dtype = type(value)
+			self.opsum[""][i, j] = value
+		elif isinstance(value, SymbolicObject):
+			for op in value.opsum:
+				if op not in self.opsum:
+					self.opsum[op] = np.zeros((self.dim, self.dim), dtype = complex)  # alternative: dtype = type(value.opsum[op])
+				self.opsum[op][i, j] = value.opsum[op]
+		else:
+			raise ValueError("Entries must be scalars or SymbolicObjects")
+
+	def applyphases(self, phases):
+		"""Apply phase factors.
+		Multiply each matrix element m, n with exp( i * (phi_m - phi_n)), where
+		i is the imaginary unit and phi_j the elements of the argument phases.
+
+		Argument:
+		phases   Numpy array of one dimension and length equal to self.dim. This
+		         vector contains the phases phi_j, in units or radians.
+
+		Returns:
+		A new SymbolicMatrix object.
+		"""
+		if phases is None:
+			raise ValueError("Phases must not be given as None")
+		if len(phases) != self.dim:
+			raise ValueError("Number of elements in phases must match matrix dimensions")
+
+		phasefactors = np.exp(1.j * np.asarray(phases))
+		phasemat = np.diag(phasefactors)
+		phasematH = phasemat.conjugate().transpose()
+
+		new_opsum = {}
+		for op in self.opsum:
+			new_opsum[op] = phasemat * (self.opsum[op] * phasematH)
+
+		return SymbolicMatrix(new_opsum)
+
+	def shuffle(self, reordering):
+		"""Shuffle matrix elements (reorder basis).
+
+		Argument:
+		reordering   Numpy array or list of one dimension and length equal to
+		             self.dim. This array encodes the new basis order, i.e.
+		             reordering[new index] = old index.
+
+		Returns:
+		A new SymbolicMatrix object.
+		"""
+		reordering = np.asarray(reordering)
+		new_opsum = {}
+		for op in self.opsum:
+			new_opsum[op] = 1. * self.opsum[op][:, reordering][reordering]
+		return SymbolicMatrix(new_opsum)
+
+	def deriv(self, to):
+		"""Take derivative with respect to k+, k-, kx, or ky.
+
+		Argument:
+		to   '+', '-', 'x', or 'y'. Take derivative with respect to this k
+		     component.
+
+		Returns:
+		A new SymbolicMatrix instance.
+		"""
+		new_opsum = SymbolicObject.deriv(self, to).opsum
+		return SymbolicMatrix(new_opsum)
+
+	def delta_n(self, dn):
+		"""Take terms whose operators are of the degree dn, where k+ counts as +1 and k- as -1.
+
+		Argument:
+		dn   Integer.
+
+		Returns:
+		A new SymbolicMatrix instance.
+		"""
+		new_opsum = {}
+		for op in self.opsum:
+			if op.count("+") - op.count("-") == dn:
+				new_opsum[op] = 1 * self.opsum[op]
+		return SymbolicMatrix(new_opsum)
+
+	def ll_evaluate(self, m_and_n, magn, delta_n_vec, all_dof = False, add_matrix = None):
+		"""Evaluate an abstract operator product at Landau level n.
+
+		Arguments:
+		m_and_n      2-tuple or integer. If a 2-tuple, the LL indices m and n.
+		             If an integer, the two identical LL indices m = n and n.
+		magn         Float or Vector instance. Magnetic field. If a Vector
+		             instance, only the perpendicular component (bz) is
+		             considered.
+		delta_n_vec  List or array. For each orbital, the 'LL offset'. This is
+		             typically related to the value of Jz (total angular
+		             momentum quantum number).
+		all_dof      True or False. Whether to include 'unphysical' degrees of
+		             freedom for the lower LL indices. If False, reduce the
+		             matrix by eliminating all 'unphysical' degrees of freedom,
+		             which should be characterized by all zeros in the
+		             respective rows and columns. If set to True, then keep
+		             everything, and preserve the shape of the matrix.
+		add_matrix   Numpy array (2-dim). Add a 'constant' contribution at the
+		             end. This is used for terms that depend on the magnetic
+		             field, but not through momentum, for example the Zeeman and
+		             exchange terms.
+
+		Returns:
+		A matrix. This may be a 2-dim numpy array (dense matrix) or a scipy
+		sparse matrix.
+		"""
+		if isinstance(m_and_n, int):
+			m = m_and_n
+			n = m_and_n
+		elif isinstance(m_and_n, tuple) and len(m_and_n) == 2 and isinstance(m_and_n[0], (int, np.integer)) and isinstance(m_and_n[1], (int, np.integer)):
+			m, n = m_and_n
+		else:
+			raise TypeError("Index must be an integer or a two-tuple of integers")
+
+		eB = eoverhbar * magn if isinstance(magn, (float, np.floating, int, np.integer)) else eoverhbar * magn.z()
+
+		# Data sizes
+		norb = len(delta_n_vec)
+		if not self.dim % norb == 0:
+			raise ValueError
+		nynz = self.dim // norb
+
+		# Calculate the difference of the LL index <m_i| and |n_j> of the
+		# orbitals, i.e., B_ij = m_i - n_j.
+		# The matrix size is norbitals x norbitals.
+		# Note that delta_n_mat is the same regardless of sign(eB)
+		delta_n_vec0 = delta_n_ll(norb, 1.0)
+		mm, nn = np.meshgrid(m + delta_n_vec0, n + delta_n_vec0, indexing = 'ij')
+		delta_n_mat = mm - nn
+
+		if self.opsum == {}:
+			return 0.0
+
+		# Initialization
+		opsum0 = self.opsum[""] if "" in self.opsum else list(self.opsum.values())[0]
+		if issparse(opsum0):
+			# Instead of csc_matrix, the matrix types dok_matrix
+			# or coo_matrix provide a marginably better performance.
+			# The lil_matrix type, as suggested by scipy's
+			# SparseEfficiencyWarning is significantly slower. See
+			# the comments in spmatrix_broadcast, too.
+			result = csc_matrix(opsum0.shape, dtype=complex)
+		elif isinstance(opsum0, np.ndarray):
+			result = np.zeros_like(opsum0)
+		else:
+			raise TypeError
+
+		for op in self.opsum:
+			# Construct operator matrix O_ij = <m_i|op|n_j>, where i, j label
+			# the orbitals. Its size is norbitals x norbitals.
+			op_delta = op.count('+') - op.count('-')
+			opmat = np.where(delta_n_mat == op_delta, np.ones((norb, norb)), np.zeros((norb, norb)))
+			opcolval = np.array([op_eval_ll(op, n + dn, eB) for dn in delta_n_vec])
+			opmat *= opcolval[np.newaxis, :]
+			# Multiply the value (matrix M_op) with the operator matrix.
+			# "Broadcast" the operator matrix over the full matrix M_op, which
+			# has size (N norbitals) x (N norbitals).
+			result += spmatrix_broadcast(self.opsum[op], opmat)
+
+		# Add a 'constant' contribution
+		# This is used for terms that depend on the magnetic field, but not
+		# through momentum. Examples: The Zeeman and exchange terms.
+		if add_matrix is not None:
+			opmat = np.where(delta_n_mat == 0, np.ones((norb, norb)), np.zeros((norb, norb)))
+			result += spmatrix_broadcast(add_matrix, opmat)
+
+		# If all_dof is False, reduce the matrix by eliminating
+		# all 'unphysical' degrees of freedom, which should be
+		# characterized by all zeros in the respective rows and
+		# columns. If set to True, then keep everything, and
+		# preserve the shape of the matrix.
+		if all_dof:
+			if issparse(opsum0):
+				return result.tocsc()
+			else:
+				return result
+		else:
+			sel = np.reshape(np.arange(0, self.dim), (nynz, norb))
+			allselm = sel[:, (delta_n_vec + m >= 0)].flatten()
+			allseln = sel[:, (delta_n_vec + n >= 0)].flatten()
+
+			if issparse(opsum0):
+				return result[allselm, :][:, allseln].tocsc()
+			else:
+				return result[allselm, :][:, allseln]
+
+class MagneticHamiltonian:
+	"""Wrapper class for magnetic Hamiltonians with arguments and keywords"""
+	def __init__(self, h_constructor, k, args, kwds = None):
+		self.h_constructor = h_constructor
+		self.k = k
+		self.args = args
+		self.kwds = {} if kwds is None else kwds
+
+	def __call__(self, b):
+		return self.h_constructor(self.k, b, *self.args, **self.kwds)
+
+class SymbolicHamiltonian(SymbolicMatrix):
+	"""Container class for a symbolic Hamiltonian, a matrix valued operator sum, which can be evaluated later.
+	This class is derived from SymbolicMatrix which is in turm derived from
+	SymbolicObject.
+
+	Attributes:
+	opsum   Dict instance, whose keys are strings containing + and -, which
+	        encode the operators k+ and k-. The values are the coefficients,
+	        matrix-valued in this case.
+	dim     Integer. Size of the square matrices.
+	hmagn   MagneticHamiltonian instance. The part of the Hamiltonian that
+	        depends on magnetic field, but not originating directly from
+	        momentum. For example, Zeeman and exchange terms.
+	"""
+	def __init__(self, h_constructor, args, kwds = None, dk = 1.0, exclude_zero = False, hmagn = False, b0 = None, kx = 0.0, ky = 0.0, kz = 0.0):
+		## Default value (kwds)
+		if kwds is None:
+			kwds = {}
+		kwds['lattice_reg'] = False  # Disable lattice regularization
+
+		## Apply magnetic field b0. Note that setting b0 to a nonzero value does
+		## not have the same effect as the inclusion of the magnetic Hamiltonian
+		## self.hmagn and evaluating at b0.
+		if b0 is not None and hmagn:
+			raise ValueError("Treating magnetic field interaction both via b0 and as MagneticHamiltonian (hmagn = true) is not supported")
+		b0 = 0.0 if b0 is None else b0
+
+		h0 = h_constructor([kx, ky, kz], b0, *args, **kwds)
+		hkx = (h_constructor([kx + dk, ky, kz], b0, *args, **kwds) - h_constructor([kx - dk, ky, kz], b0, *args, **kwds)) / 2 / dk
+		hky = (h_constructor([kx, ky + dk, kz], b0, *args, **kwds) - h_constructor([kx, ky - dk, kz], b0, *args, **kwds)) / 2 / dk
+		hkp = 0.5 * hkx - 0.5j * hky
+		hkm = 0.5 * hkx + 0.5j * hky
+		hkxkx = (h_constructor([kx + dk, ky, kz], b0, *args, **kwds) - 2 * h_constructor([kx, ky, kz], b0, *args, **kwds) + h_constructor([kx - dk, ky, kz], b0, *args, **kwds)) / dk / dk
+		hkyky = (h_constructor([kx, ky + dk, kz], b0, *args, **kwds) - 2 * h_constructor([kx, ky, kz], b0, *args, **kwds) + h_constructor([kx, ky - dk, kz], b0, *args, **kwds)) / dk / dk
+		hkxky = (h_constructor([kx + dk, ky + dk, kz], b0, *args, **kwds) - h_constructor([kx + dk, ky - dk, kz], b0, *args, **kwds) - h_constructor([kx - dk, ky + dk, kz], b0, *args, **kwds) + h_constructor([kx - dk, ky - dk, kz], b0, *args, **kwds)) / 4 / dk / dk
+		hkpkp = 0.25 * (hkxkx - hkyky - 2.j * hkxky)
+		hkmkm = 0.25 * (hkxkx - hkyky + 2.j * hkxky)
+		hkpkm = 0.25 * (hkxkx + hkyky)
+
+		self.opsum = {"+": hkp, "-": hkm, "++": 0.5 * hkpkp, "--": 0.5 * hkmkm, "+-": 0.5 * hkpkm, "-+": 0.5 * hkpkm}
+		# print (self.opsum['+'][0:6, 0:6])
+		if hmagn:
+			self.hmagn = MagneticHamiltonian(h_constructor, [kx, ky], args, kwds)
+		elif not exclude_zero:
+			self.opsum[""] = h0
+			self.hmagn = None
+		else:
+			self.hmagn = None
+		self.dim = h0.shape[0]
+
+	def evaluate(self, k, eB):
+		"""Evaluate an abstract operator product.
+		Take into account nonzero commutator between k+ and k-. Split between
+		magnetic and nonmagnetic Hamiltonian parts.
+
+		Arguments:
+		k    Vector instance or 2-tuple. The vector value (kx, ky). For Vector
+			 instances, nonzero kz values are ignored.
+		eB   Vector or float. Magnetic field in z direction, times e / hbar.
+
+		Returns:
+		A matrix.
+		"""
+		hmagn = 0.0 if self.hmagn is None else self.hmagn(eB / eoverhbar)  # eB = eoverhbar * magn
+		hrest = sum([self.opsum[op] * op_eval(op, k, eB) for op in self.opsum])
+		total = hmagn + hrest
+		# Mixed sums of sparse matrices and dense arrays evaulate to a
+		# numpy.matrix. We thus explicitly cast dense results to numpy.array.
+		return total if issparse(total) else np.array(total)
+
+	def ll_evaluate(self, m_and_n, magn, delta_n_vec, all_dof = False, add_matrix = None):
+		"""Evaluate the SymbolicHamiltonian instance product at Landau level n.
+
+		Arguments:
+		m_and_n      2-tuple or integer. If a 2-tuple, the LL indices m and n.
+		             If an integer, the two identical LL indices m = n and n.
+		magn         Float or Vector instance. Magnetic field. If a Vector
+		             instance, only the perpendicular component (bz) is
+		             considered.
+		delta_n_vec  List or array. For each orbital, the 'LL offset'. This is
+		             typically related to the value of Jz (total angular
+		             momentum quantum number).
+		all_dof      True or False. Whether to include 'unphysical' degrees of
+		             freedom for the lower LL indices. If False, reduce the
+		             matrix by eliminating all 'unphysical' degrees of freedom,
+		             which should be characterized by all zeros in the
+		             respective rows and columns. If set to True, then keep
+		             everything, and preserve the shape of the matrix.
+		add_matrix   None. (Placeholder)
+
+		Returns:
+		A matrix. This may be a 2-dim numpy array (dense matrix) or a scipy
+		sparse matrix.
+		"""
+		if add_matrix is not None:
+			raise ValueError("Argument add_matrix must be None.")
+		add_matrix = None if self.hmagn is None else self.hmagn(magn)
+		return SymbolicMatrix.ll_evaluate(self, m_and_n, magn, delta_n_vec, all_dof = all_dof, add_matrix = add_matrix)
+
+	def hper1(self, avec):
+		"""Get first order perturbation in Löwdin partitioning.
+
+		Argument:
+		avec   Matrix or 2-dimensional array, whose columns are the eigenvectors
+		       of the 'A' bands.
+
+		Returns:
+		A matrix.
+
+		Note:
+		See bhz.py for more information.
+		"""
+		# hprime = self
+		na = len(avec)
+		hper1 = [[0 for j in range(0, na)] for i in range(0, na)]
+		for j in range(0, na):
+			for i in range(0, na):
+				hper1[i][j] = self.bramidket(avec[i], avec[j])
+		return hper1
+
+	def hper2(self, e_a, e_b, avec, bvec, verbose = False):
+		"""Get second order perturbation in Löwdin partitioning.
+
+		Arguments:
+		e_a    List or 1-dimensional array. The eigenvalues of the 'A' bands.
+		e_b    List or 1-dimensional array. The eigenvalues of the 'B' bands.
+		avec   Matrix or 2-dimensional array, whose columns are the eigenvectors
+		       of the 'A' bands.
+		bvec   Matrix or 2-dimensional array, whose columns are the eigenvectors
+		       of the 'B' bands.
+
+		Returns:
+		A matrix.
+
+		Note:
+		See bhz.py for more information.
+		"""
+		# hprime = self
+		na = len(e_a)
+		nb = len(e_b)
+		hper2 = [[SymbolicObject(0) for j in range(0, na)] for i in range(0, na)]
+		for j in range(0, na):
+			for i in range(0, na):
+				rec_e = reciprocal_energies(e_a[i], e_a[j], e_b)
+
+				for l in range(0, nb):
+					hper2[i][j] += (self.bramidket(avec[i], bvec[l]) * self.bramidket(bvec[l], avec[j]) * 0.5 * rec_e[l])
+					if verbose:  # diagnostic display
+						overlap1 = self.bramidket(avec[i], bvec[l]).chop(1e-7)
+						overlap2 = self.bramidket(bvec[l], avec[j]).chop(1e-7)
+						if (not overlap1.iszero(1e-7)) or (not overlap2.iszero(1e-7)):
+							print("(%i, %i) [%8.3f] |  %2i [%8.3f]:" % (i, j, e_a[i], l, e_b[l]), rec_e[l])
+							print(overlap1)
+							print(overlap2)
+							print((overlap1 * overlap2).chop(1e-7))
+							print()
+
+		return hper2
diff --git a/kdotpy-v1.0.0/src/kdotpy/symmetry.py b/kdotpy-v1.0.0/src/kdotpy/symmetry.py
new file mode 100644
index 0000000000000000000000000000000000000000..55d8627a4d41819727d900509d6f6e3298fee257
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/symmetry.py
@@ -0,0 +1,280 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import sys
+import re
+
+from .momentum import VectorTransformation, get_vectortransformation
+
+def identify_group_by_symmetries(symm):
+	"""Identify group from list of symmetries.
+
+	Argument:
+	symm    List of strings or VectorTransformation instances. A list of
+	        transformations under which the system is symmetric.
+
+	Note:
+	It is acceptable to omit 'implied' symmetries. For example, if '4(z)(+)' is
+	a symmetry, its inverse '4(z)(-)' need not be included in the argument symm.
+
+	Returns:
+	String. The Schönflies label of the group in ASCII, i.e., subscripts written
+	as normal text.
+	"""
+	if not isinstance(symm, list) or not all([isinstance(s, (str, VectorTransformation)) for s in symm]):
+		raise TypeError("Argument symm must be a list of strings or VectorTransformation instances.")
+
+	symm_classes = []
+	for s1 in symm:
+		s = s1 if isinstance(s1, str) else s1.name
+		if s in ['1', 'i']:
+			symm_classes.append(s)
+		if re.match(r"m\([xyz][+-][xyz]\)", s) is not None and 'm(x+y)' not in symm_classes:
+			symm_classes.append('m(x+y)')
+		if re.match(r"2\([xyz][+-][xyz]\)", s) is not None and '2(x+y)' not in symm_classes:
+			symm_classes.append('2(x+y)')
+		if re.match(r"[m2]\([xyz]\)", s) is not None and s not in symm_classes:
+			symm_classes.append(s)
+		if s in ['m(t)', 'm(v)'] and 'm(t)' not in symm_classes:
+			symm_classes.append('m(t)')
+		if s in ['m(u)', 'm(w)'] and 'm(u)' not in symm_classes:
+			symm_classes.append('m(u)')
+		if s in ['2(t)', '2(v)'] and '2(t)' not in symm_classes:
+			symm_classes.append('2(t)')
+		if s in ['2(u)', '2(w)'] and '2(u)' not in symm_classes:
+			symm_classes.append('2(u)')
+		if re.match(r"3\([abcd]\)", s) is not None and '3(a)' not in symm_classes:
+			symm_classes.append('3(a)')
+		if re.match(r"-3\([abcd]\)", s) is not None and '-3(a)' not in symm_classes:
+			symm_classes.append('-3(a)')
+		if re.match(r"-?[346]\(z\)", s) and s not in symm_classes:
+			symm_classes.append(s)
+
+	if 'i' in symm_classes:  # inversion symmetric
+		if '3(a)' in symm_classes:  # cubic groups or threefold subgroups [-3(a) implied]
+			if '4(z)' in symm_classes:
+				return 'Oh'
+			elif 'm(z)' in symm_classes or '2(z)' in symm_classes:
+				return 'Th'
+			elif 'm(x+y)' in symm_classes:
+				return 'D3d'
+			else:
+				return 'C3i'
+		if '3(z)' in symm_classes:  # threefold groups with z axis
+			if 'm(x)' in symm_classes and 'm(y)' in symm_classes and 'm(z)' in symm_classes:
+				return 'D6h'
+			elif 'm(x)' in symm_classes and 'm(t)' in symm_classes:
+				return 'D3d'
+			elif 'm(y)' in symm_classes and 'm(u)' in symm_classes:
+				return 'D3d'  # different orientation
+			elif 'm(z)' in symm_classes:
+				return 'C6h'
+			else:
+				return 'C3i'
+		if '4(z)' in symm_classes and 'm(z)' in symm_classes:  # contains 4/m
+			if 'm(x+y)' in symm_classes or 'm(x)' in symm_classes:
+				return 'D4h'
+			else:
+				return 'C4h'
+		for ax in ['z', 'x', 'y', 'x+y']:
+			if ('2(%s)' % ax) in symm_classes and ('m(%s)' % ax) in symm_classes:  # contains 2/m (any axis)
+				for ax2 in ['z', 'x', 'y', 'x+y']:
+					if ax2 != ax and ('2(%s)' % ax2) in symm_classes and ('m(%s)' % ax) in symm_classes:
+						return 'D2h'  # at least two sets of 2/m
+				else:
+					return 'C2h'  # one set of 2/m
+		for ax in ['z', 'x', 'y', 'x+y', 't', 'u']:
+			if ('2(%s)' % ax) in symm_classes:
+				return 'C2h'
+		return 'Ci'
+
+	else:  # inversion asymmetric
+		if '3(a)' in symm_classes:  # cubic groups or threefold subgroups [-3(a) not possible without inversion]
+			if '-4(z)' in symm_classes:
+				return 'Td'
+			elif '4(z)' in symm_classes:
+				return 'O'
+			elif '2(z)' in symm_classes and '2(x+y)' in symm_classes:
+				return 'T'
+			elif '2(x+y)' in symm_classes:
+				return 'D3'
+			elif 'm(x+y)' in symm_classes:
+				return 'C3v'
+			else:
+				return 'C3'
+		if '3(z)' in symm_classes:  # threefold groups with z axis
+			if '2(z)' in symm_classes:
+				if '2(x)' in symm_classes and '2(y)' in symm_classes:
+					return 'D6'
+				elif 'm(x)' in symm_classes and 'm(y)' in symm_classes:
+					return 'C6v'
+				else:
+					return 'C6'
+			elif 'm(z)' in symm_classes:
+				if '2(x)' in symm_classes and 'm(y)' in symm_classes:
+					return 'D3h'
+				elif 'm(x)' in symm_classes and '2(y)' in symm_classes:
+					return 'D3h'  # different orientation
+				else:
+					return 'C3h'
+			else:
+				if 'm(x)' in symm_classes and 'm(t)' in symm_classes:
+					return 'C3v'
+				elif 'm(y)' in symm_classes and 'm(u)' in symm_classes:
+					return 'C3v'  # different orientation
+				elif 'm(x+y)' in symm_classes:
+					return 'C3v'  # different orientation
+				elif '2(x)' in symm_classes:
+					return 'D3'
+				elif '2(y)' in symm_classes:
+					return 'D3'  # different orientation
+				else:
+					return 'C3'
+		if '-4(z)' in symm_classes:  # contains -4
+			if 'm(x+y)' in symm_classes and '2(x)' in symm_classes:
+				return 'D2d'
+			elif '2(x+y)' in symm_classes and 'm(x)' in symm_classes:
+				return 'D2d'  # different orientation of D2d
+			else:
+				return 'S4'
+		if '4(z)' in symm_classes:  # contains 4
+			if 'm(x+y)' in symm_classes and 'm(x)' in symm_classes:
+				return 'C4v'
+			elif '2(x+y)' in symm_classes and '2(x)' in symm_classes:
+				return 'D4'
+			else:
+				return 'C4'
+		if '2(z)' in symm_classes:
+			if '2(x)' in symm_classes and '2(y)' in symm_classes:
+				return 'D2'
+			elif 'm(x)' in symm_classes and 'm(y)' in symm_classes:
+				return 'C2v'
+			elif '2(x+y)' in symm_classes:
+				return 'D2'  # different orientation
+			elif 'm(x+y)' in symm_classes:
+				return 'C2v'  # different orientation
+			else:
+				return 'C2'
+		if '2(y)' in symm_classes:
+			if '2(x)' in symm_classes and '2(z)' in symm_classes:
+				return 'D2'
+			elif 'm(x)' in symm_classes and 'm(z)' in symm_classes:
+				return 'C2v'
+			elif '2(x+y)' in symm_classes:
+				return 'D2'  # different orientation
+			elif 'm(x+y)' in symm_classes:
+				return 'C2v'  # different orientation
+			else:
+				return 'C2'
+		if '2(x)' in symm_classes:
+			if '2(y)' in symm_classes and '2(z)' in symm_classes:
+				return 'D2'
+			elif 'm(y)' in symm_classes and 'm(z)' in symm_classes:
+				return 'C2v'
+			elif '2(x+y)' in symm_classes:
+				return 'D2'  # different orientation
+			elif 'm(x+y)' in symm_classes:
+				return 'C2v'  # different orientation
+			else:
+				return 'C2'
+		for ax in ['z', 'x', 'y', 'x+y', 't', 'u']:
+			if ('2(%s)' % ax) in symm_classes:
+				return 'C2'
+			if ('m(%s)' % ax) in symm_classes:
+				return 'Cs'
+		return 'C1'
+
+def analyze(data):
+	"""Do symmetry analysis.
+
+	Argument:
+	data   DiagData instance.
+
+	Output:
+	List of symmetries and label of symmetry group written to stdout.
+
+	No return value.
+	"""
+	# TODO: Options, e.g. observables True/False, ...
+	all_tfm = get_vectortransformation('all')
+	symm_tfm = []
+	spin_tfm = ['1']
+	obs_rep = {}
+	spin_obs_rep = {}
+	nt = len(all_tfm)
+	for jt, vt in enumerate(all_tfm):
+		vt_str = vt if isinstance(vt, str) else vt.name
+		print()
+		print("Transformation %s" % vt_str)
+		symm_result, obs_result = data.symmetry_test(vt, observables = True, verbose = 'verbose' in sys.argv)
+		if symm_result:
+			symm_tfm.append(vt_str)
+		if symm_result and obs_result:
+			if obs_rep == {}:
+				for obs in obs_result:
+					obs_rep[obs] = obs_result[obs]
+			else:
+				for obs in obs_result:
+					obs_rep[obs] = [rep for rep in obs_rep[obs] if rep in obs_result[obs]]
+			if 's(x,y,z)' in obs_result and 'T1g' in obs_result['s(x,y,z)']:
+				spin_tfm.append(vt_str)
+				if spin_obs_rep == {}:
+					for obs in obs_result:
+						spin_obs_rep[obs] = obs_result[obs]
+				else:
+					for obs in obs_result:
+						spin_obs_rep[obs] = [rep for rep in spin_obs_rep[obs] if rep in obs_result[obs]]
+		sys.stderr.write("%i / %i\n" % (jt + 1, nt))
+		sys.stderr.flush()
+	print("Eigenvalues symmetric under:", ", ".join(symm_tfm))
+	print("Group:", identify_group_by_symmetries(symm_tfm))
+	print()
+	print("Spin states axial under:", ", ".join(spin_tfm))
+	print("Group:", identify_group_by_symmetries(spin_tfm))
+	print()
+	obs_rep1 = spin_obs_rep if spin_obs_rep != {} else obs_rep if obs_rep != {} else None
+	if obs_rep1 is not None:
+		print("Observables representations (Oh notation):")
+		for obs in sorted(obs_rep1):
+			print("%-10s:" % obs, "???" if len(obs_rep1[obs]) == 0 else ", ".join(obs_rep1[obs]))
+		print()
+	sys.stderr.write("Warning (symmetry.analyze): Symmetry analysis is an experimental feature. Results may depend sensitively on input parameters, such as the 'split' option and the definition of the momentum grid.\n")
+	return
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/tableo/__init__.py b/kdotpy-v1.0.0/src/kdotpy/tableo/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..066544a86b8770959c83b5d7260ba6d8d65f2f34
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/tableo/__init__.py
@@ -0,0 +1,61 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+# in auxil.py:
+from .auxil import q_z, potential, extrema, transitions
+
+# in disp.py:
+from .disp import disp_byband, disp
+
+# in dos.py
+from .dos import local_density, dos_idos, dos_byband, energy_at_density, densityz
+
+# in wf.py
+from .wf import wavefunction_z, abs_wavefunctions_z, abs_wavefunctions_y, wavefunction_zy
+
+# in simple.py:
+from .simple import simple, simple2d
+
+# in read.py
+from .read import read_csv
+from .read import read_csv_dict as read_dict
+
+# in tools.py
+from .tools import get_label_unit_style, get_precision
diff --git a/kdotpy-v1.0.0/src/kdotpy/tableo/auxil.py b/kdotpy-v1.0.0/src/kdotpy/tableo/auxil.py
new file mode 100644
index 0000000000000000000000000000000000000000..af6520174419c1b6dc08d70caf02cd2202aca85a
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/tableo/auxil.py
@@ -0,0 +1,419 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+import sys
+from ..physconst import eoverhbar
+from ..momentum import Vector
+from ..config import get_config, get_config_int
+from .tools import get_format, format_quantity_and_unit
+from .write import write
+from .simple import simple
+
+### HELPER FUNCTIONS ###
+def get_bandsextrema_vector(bands_extrema):
+	"""Get an arbitrary extrema k vector, e.g. for determining dimension and type"""
+	for band_extrema in bands_extrema.values():
+		if len(band_extrema) == 0:
+			continue
+		k0 = band_extrema[0].k  # Extract value from first band extremum
+		if not isinstance(k0, Vector):
+			raise TypeError("Invalid type for extrema position")
+		return k0
+	raise ValueError("No extrema are defined")
+
+def get_bandsextrema_massdim(bands_extrema):
+	"""Get dimensionality of band masses"""
+	for band_extrema in bands_extrema.values():
+		if len(band_extrema) == 0:
+			continue
+		ex = band_extrema[0]  # Extract value from first band extremum
+		massdim = 1 if isinstance(ex.mass, float) else len(ex.mass)
+		return massdim
+	return 1
+
+def get_tableextrema_quantities(bands_extrema):
+	"""Table of band extrema, wrapper version
+
+	Arguments:
+	band_extrema  Dict instance, whose keys are band labels and whose values are
+	              are lists of BandExtremum instances.
+
+	Returns:
+	quantities    List of strings.
+	"""
+	quantities = []
+	bindex_present = False
+	llindex_present = False
+	char_present = False
+	for band_extrema in bands_extrema.values():
+		if len(band_extrema) == 0:
+			continue
+		ex = band_extrema[0]  # Extract values from first band extremum
+		if ex.bindex is not None:
+			bindex_present = True
+		if ex.llindex is not None:
+			llindex_present = True
+		if ex.char is not None:
+			char_present = True
+
+	if llindex_present:
+		quantities.append("llindex")
+	if bindex_present:
+		quantities.append("bindex")
+	if char_present:
+		quantities.append("char")
+	quantities.append("minmax")
+
+	k0 = get_bandsextrema_vector(bands_extrema)
+	quantities.extend(k0.components('k'))
+
+	quantities.append("E")
+
+	massdim = get_bandsextrema_massdim(bands_extrema)
+	if massdim == 1:
+		quantities.append("mass")
+	else:
+		quantities.extend([f"mass{i + 1}" for i in range(massdim)])
+	return quantities
+
+def iter_extrema(bands_extrema):
+	"""Flat iterator over bands extrema.
+
+	Argument:
+	bands_extrema   A dict instance whose values are lists of BandExtrema
+	                instances.
+
+	Yields:
+	ex              A BandExtrema instance.
+	"""
+	for _, band_extrema in sorted(bands_extrema.items()):
+		if len(band_extrema) == 0:
+			continue
+		# Sort by vector length
+		order = np.argsort([ex.k.len() for ex in band_extrema])
+		for j in order:
+			yield band_extrema[j]
+	return
+
+### CONSTRUCTION FUNCTIONS ###
+
+def extrema(filename, bands_extrema, float_precision = 5, angle_degrees = True):
+	"""Table of band extrema.
+
+	Arguments:
+	filename         String. The output file name.
+	band_extrema     Dict instance, whose keys are band labels and whose values
+	                 are lists of BandExtremum instances.
+	float_precision  Integer. Number of digits for floating point numbers.
+	angle_degrees    True or False. Whether the angular units are degrees (True)
+	                 or radians (False).
+
+	No return value.
+	"""
+	quantities = get_tableextrema_quantities(bands_extrema)
+	k0 = get_bandsextrema_vector(bands_extrema)
+	if isinstance(k0, Vector):
+		angle_degrees = (angle_degrees and k0.degrees)
+	if float_precision is None:
+		float_precision = get_config_int('table_extrema_precision', minval=2)
+	if float_precision < 3:
+		sys.stderr.write("Warning (tableo.extrema): Precision (option 'table_extrema_precision') must be at least 2, ideally >= 3.\n")
+
+	all_data = {c: [] for c in quantities}
+	for ex in iter_extrema(bands_extrema):
+		if 'llindex' in quantities:
+			all_data['llindex'].append(ex.llindex)
+		if 'bindex' in quantities:
+			all_data['bindex'].append(ex.bindex)
+		if 'char' in quantities:
+			all_data['char'].append("" if ex.char is None else ex.char)
+		k = ex.k.astype(k0.vtype)
+		all_data['minmax'].append(ex.minmax)
+		for co, val in k.to_dict(prefix = 'k').items():
+			all_data[co].append(val)
+		all_data['E'].append(ex.energy)
+		if 'mass' in quantities:
+			all_data['mass'].append(ex.mass if isinstance(ex.mass, (float, np.floating)) else ex.mass[0])
+		if isinstance(ex.mass, tuple):
+			for i, mi in enumerate(ex.mass):
+				if f'mass{i + 1}' in quantities:
+					all_data[f'mass{i + 1}'].append(mi)
+
+	formats = [get_format(q, float_precision, degrees=angle_degrees) for q in quantities]
+	columns = []
+	units = []
+	for q in quantities:
+		colstr, ustr = format_quantity_and_unit(q, degrees=angle_degrees)
+		columns.append(colstr)
+		units.append(ustr)
+
+	write(filename, all_data, formats, columns=columns, units=units)
+	return
+
+def q_z(filename, params, qty, clabel = None, units = None, precision = None):
+	"""Table of Q(z), i.e., quantities as function of z.
+	This provides a generic table with z value in the first column, and one or
+	more data columns with z-dependent quantities.
+
+	Arguments:
+	filename    String. The output file name.
+	params      PhysParams instance. Used to extract the z values.
+	qty         List or array of dimension 1 or 2. If 1-dimensional, output a
+	            single data column. If 2-dimensional, output multiple data
+	            columns. The data needs to be ordered in rows, i.e., it should
+	            be of the form [[q1(z), ...], [q2(z), ...], ...].
+	clabel      String or list of strings. Labels for the data columns.
+	units       String, list of strings or None. Units associated to the
+	            columns. If a single string, use the same unit for all data
+	            columns (not the column for z). If None, do not output units.
+	precision   Integer or None. Number of digits for floating point numbers. If
+	            None, use the configuration setting 'table_qz_precision'.
+
+	No return value.
+	"""
+	if precision is None:
+		precision = get_config_int('table_qz_precision', minval = 2)
+	if precision < 3:
+		sys.stderr.write("Warning (tableo.q_z): Precision (option 'table_qz_precision') must be at least 2, ideally >= 3.\n")
+
+	nz = params.nz
+	z = params.zvalues_nm()
+	if isinstance(qty, list):
+		if len(qty) == 0:
+			return
+		elif len(qty) == nz and isinstance(qty[0], (float, np.floating, int, np.integer, complex, np.complex_)):
+			qz = np.array([qty])
+		elif isinstance(qty[0], (list, np.ndarray)) and len(qty[0]) == nz:
+			qz = np.array(qty)
+		elif isinstance(qty[0], str):
+			qz = []
+			for q in qty:
+				try:
+					qz.append([params.z(z1)[q] for z1 in range(0, nz)])  # not very efficient, but it will work
+				except:
+					pass
+			qz = np.array(qz)
+		else:
+			sys.stderr.write("ERROR (tableo.q_z): Input list has invalid shape.\n")
+			return
+	elif isinstance(qty, np.ndarray):
+		qsh = qty.shape
+		if len(qsh) == 1 and qsh[0] == nz:
+			qz = np.array([qty])
+		elif len(qsh) == 2 and qsh[1] == nz:
+			qz = np.array(qty)
+		else:
+			sys.stderr.write("ERROR (tableo.q_z): Input array has invalid shape.\n")
+			return
+	else:
+		sys.stderr.write("ERROR (tableo.q_z): Input must be array or list.\n")
+		return
+
+	if len(qz) == 0:
+		sys.stderr.write("Warning (table_q_z): Nothing to be written.\n")
+		return
+
+	qz = np.concatenate((np.array([z]), qz))
+
+	## Determine colum headers and units (also part of y axis label
+	## If not specified, try to do it automatically
+	if clabel is None:
+		if isinstance(qty, list) and isinstance(qty[0], str):
+			columns = ["z"] + ["%s" % q for q in qty]
+		else:
+			columns = ["z"] + ["q%i" % i for i in range(1, len(qz))]
+			sys.stderr.write("Warning (tableo.q_z): Column headings could not be determined automatically.\n")
+	elif isinstance(clabel, str) and len(qz) == 2:
+		columns = ["z", clabel]
+	elif isinstance(clabel, list) and len(clabel) == len(qz) - 1:
+		columns = ["z"] + ["%s" % s for s in clabel]
+	else:
+		columns = ["z"] + ["q%i" % i for i in range(1, len(qz))]
+		sys.stderr.write("Warning (tableo.q_z): Column headings could not be determined.\n")
+
+	if isinstance(units, str):
+		units1 = ["nm"] + [units] * (len(columns) - 1)
+	elif isinstance(units, list) and len(units) == len(columns) - 1:
+		units1 = ["nm"] + units
+	else:
+		units1 = None
+
+	formats = [get_format(c, precision) for c in columns]
+
+	write(filename, qz, formats, columns=columns, units=units1)
+	return
+
+def potential(filename, params, pot, clabel = None, units = None, **kwds):
+	"""Table of potential values, wrapper function"""
+	pot = np.asarray(pot)
+	if pot.ndim == 1:
+		q_z(filename, params, pot, clabel = clabel, units= units, **kwds)
+	elif pot.ndim == 2 and pot.shape[1] == params.norbitals:
+		qty = []
+		clabel1 = []
+		units1 = None
+		if np.amax(np.abs(np.diff(pot, axis = 1))) < 1e-9:
+			q_z(filename, params, pot[:, 0], clabel = clabel, units= units, **kwds)
+		else:
+			if np.amax(np.abs(pot[:, 0] - pot[:, 1])) < 1e-9:
+				qty.append(pot[:, 0])
+				clabel1.append("potential6" if clabel is None else clabel + '6')
+			else:
+				qty += [pot[:, 0], pot[:, 1]]
+				clabel1 += ["potential6(+1/2)" if clabel is None else clabel + '6(+1/2)', "potential6(-1/2)" if clabel is None else clabel + '6(-1/2)']
+			if np.amax(np.abs(np.diff(pot[:, 2:6], axis = 1))) < 1e-9:
+				qty.append(pot[:, 2])
+				clabel1.append("potential8" if clabel is None else clabel + '8')
+			else:
+				if np.amax(np.abs(pot[:, 2] - pot[:, 5])) < 1e-9:
+					qty.append(pot[:, 2])
+					clabel1.append("potential8h" if clabel is None else clabel + '8h')
+				else:
+					qty.append(pot[:, 2])
+					clabel1.append("potential8(+3/2)" if clabel is None else clabel + '8(+3/2)')
+				if np.amax(np.abs(pot[:, 3] - pot[:, 4])) < 1e-9:
+					qty.append(pot[:, 3])
+					clabel1.append("potential8l" if clabel is None else clabel + '8l')
+				else:
+					qty += [pot[:, 3], pot[:, 4]]
+					clabel1 += ["potential8(+1/2)" if clabel is None else clabel + '8(+1/2)', "potential8(-1/2)" if clabel is None else clabel + '8(-1/2)']
+				if not (np.amax(np.abs(pot[:, 2] - pot[:, 5])) < 1e-9):
+					qty.append(pot[:, 5])
+					clabel1.append("potential8(-3/2)" if clabel is None else clabel + '8(-3/2)')
+			if params.norbitals < 8:
+				pass
+			elif np.amax(np.abs(pot[:, 6] - pot[:, 7])) < 1e-9:
+				qty.append(pot[:, 6])
+				clabel1.append("potential7" if clabel is None else clabel + '7')
+			else:
+				qty += [pot[:, 6], pot[:, 7]]
+				clabel1 += ["potential7(+1/2)" if clabel is None else clabel + '7(+1/2)', "potential7(-1/2)" if clabel is None else clabel + '7(-1/2)']
+			units1 = None if units is None else [units for _ in qty]
+		q_z(filename, params, qty, clabel = clabel1, units= units1, **kwds)
+	return
+
+
+def transitions(filename, data, delta_e_min = 0.1):
+	"""Table of (Fermi) energy at density, wrapper version.
+	Provide data for transitions, i.e., energies and band labels (LL index, band
+	index) for the pair of states, the transition rate, and several quantities
+	derived from these.
+
+	Arguments:
+	filename     String. The output file name.
+	data         DiagData instance. The DiagDataPoint elements should have their
+	             transitions being set, i.e., ddp.transitions is not None, but a
+	             TransitionsData instance.
+	delta_e_min  Float. Minimal energy difference below which transitions are
+	             not written to the output file.
+
+	No return value.
+	"""
+	precision = get_config_int('table_transitions_precision', minval = 0)
+
+	if len(data) == 0:
+		sys.stderr.write("Warning (tableo.transitions): No data\n")
+		return
+	ntr = 0
+	for d in data:
+		if d.transitions is not None:
+			ntr += d.transitions.n
+	if ntr == 0:
+		sys.stderr.write("Warning (tableo.transitions): No transitions data\n")
+		return
+	paramval = data.get_paramval()
+	if len(paramval) > 0 and isinstance(paramval[0], Vector):
+		paramcomp = paramval[0].components(prefix = 'b')
+		table_clabel = list(paramcomp)
+		table_units = ['' for _ in table_clabel]
+	else:
+		sys.stderr.write("Warning (tableo.transitions): Parameter values (magnetic field) invalid.\n")
+		return
+	occupancies = any([d.transitions is None or d.transitions.occupancy is not None for d in data])
+	refractive_index = any([d.transitions is None or d.transitions.refr_index is not None for d in data])
+
+	table_clabel.extend(['LL1', 'B1', 'E1', 'LL2', 'B2', 'E2', 'deltaE', 'freq', 'lambda', 'amplitude'])
+	table_units.extend( [   '',   '','meV',    '',   '','meV',    'meV',  'THz',  '\xb5m', 'nm^2 ns^-2 meV^-1'])
+	table_data = []
+	if occupancies:
+		table_clabel.extend(['occupancy', 'degeneracy', 'rate_density'])
+		table_units.extend(['', '10^-3 nm^-2', 'mV^-2 ns^-1'])
+	if refractive_index:
+		table_clabel.extend(['absorption', 'absorption_delta'])
+		table_units.extend(['\u2030 (10^-3)', '\u2030 (10^-3)'])
+
+	for d in data:
+		td = d.transitions
+		if td is None or td.n == 0:
+			continue
+		if isinstance(d.paramval, Vector):
+			kdata = np.array([d.paramval.value] * td.n).transpose()
+			bz = d.paramval.z()
+		else:
+			kdata = d.paramval * np.ones(td.n)
+			bz = d.paramval
+		if isinstance(td.bval, (float, int, np.floating, np.integer)) and td.bval != bz:
+			raise ValueError("Non-matching magnetic field values between DiagDataPoint and TransitionsData instances.")  # This should never happen!
+		degeneracy = (float("nan") if bz is None else (eoverhbar / 2.0 / np.pi) * bz) * np.ones(td.n)
+		if d.bindex is not None:
+			td.set_bindex(d.eival, d.llindex, d.bindex)  # d.llindex = None handled automatically
+		b0 = float("nan") * np.ones(td.n) if td.bindex is None else td.bindex[:, 0]
+		b1 = float("nan") * np.ones(td.n) if td.bindex is None else td.bindex[:, 1]
+		delta_e = td.energies[:, 1] - td.energies[:, 0]
+		rate_dens = td.rate_density()
+		transdata1 = [td.llindex[:, 0], b0, td.energies[:, 0], td.llindex[:, 1], b1, td.energies[:, 1], delta_e, td.freq_ghz() * 1e-3, td.lambda_nm() * 1e-3, td.amplitudes]
+		if occupancies:
+			occ = float("nan") * np.ones(td.n) if td.occupancy is None else td.occupancy
+			transdata1.extend([occ, 1e3 * degeneracy, rate_dens])
+		if td.refr_index is not None:
+			## transition rate density per photon
+			transdata1.extend([1e3 * td.absorption(), 1e3 * td.absorption(signed = True)])
+		elif refractive_index:
+			## values not defined, but 'absorption' columns are present
+			transdata1.extend([float("nan") * np.ones(td.n)] * 2)
+		transdata = np.vstack(transdata1)
+		# discard transitions with very small energy difference
+		sel = (np.abs(delta_e) >= delta_e_min)
+		if np.count_nonzero(sel) > 0:
+			table_data.append(np.vstack([kdata, transdata])[:, sel])
+
+	simple(filename, data = np.hstack(table_data), float_precision = (precision, 'g'), clabel = table_clabel, cunit = table_units)
+	return
diff --git a/kdotpy-v1.0.0/src/kdotpy/tableo/disp.py b/kdotpy-v1.0.0/src/kdotpy/tableo/disp.py
new file mode 100644
index 0000000000000000000000000000000000000000..a8aafef7b7074ff60d74a74e95bf8a912e177b21
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/tableo/disp.py
@@ -0,0 +1,645 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+import sys
+from ..momentum import Vector, VectorGrid
+from ..config import get_config_int, get_config_bool, get_config
+from ..observables import all_observables
+from .tools import bandlabel_to_fileid
+from .tools import format_quantity_and_unit, vector_units, float_format
+from .write import write
+
+
+### HELPER FUNCTIONS ###
+
+def get_vector_components(ddp):
+	"""Get all vector components from DiagDataPoint"""
+	if isinstance(ddp.k, Vector):
+		return ddp.k.components(prefix = 'k')
+	elif isinstance(ddp.k, tuple):
+		l = len(ddp.k)
+		return ["kx", "ky", "kz"][:l] if l <= 3 else [f"k{i+1}" for i in range(l)]
+	elif ddp.k is None:
+		return []
+	else:
+		return ["k"]
+
+def get_paramval_components(paramval, paramstr):
+	"""Extract parameter value components from paramval and paramstr
+
+	See get_tabledispersion_quantities() for more information.
+	"""
+	if paramval is not None and len(paramval) > 0 and isinstance(paramval[0], Vector):
+		return paramval[0].components(prefix=paramstr)
+	else:
+		return []
+
+def get_tabledispersion_quantities(data, observables = None, paramval = None, paramstr =''):
+	"""Get quantities (column ids) for dispersion table.
+
+	Arguments:
+	data         DiagData instance. The data.
+	observables  List of strings or None. The columns containing observables.
+	paramval     List of Vector instances or None. If set, add columns
+	             corresponding to the parameter value.
+	paramstr     String. The parameter, i.e., the prefix of the Vector instances
+	             in paramval. Typically, this is 'b' for magnetic field.
+
+	Returns:
+	quantities    List of strings. All column headers.
+	"""
+	data_k0 = data.get_base_point()  # is base point if zero point is undefined
+	quantities = get_vector_components(data_k0)
+	quantities += get_paramval_components(paramval, paramstr)
+	quantities.append("E")
+	if data_k0.llindex is not None:
+		quantities.append('llindex')
+	if data_k0.bindex is not None:
+		quantities.append('bindex')
+	if data_k0 is not None and data_k0.char is not None:
+		quantities.append('char')
+
+	if data_k0 is not None and data_k0.obsids is not None:
+		quantities += data_k0.obsids
+	elif observables is not None and data_k0.obsvals is not None:
+		quantities += observables
+
+	## Check for duplicates and raise warning
+	if len(set(quantities)) != len(quantities):
+		sys.stderr.write("Warning (get_tabledispersion_quantities): Duplicate quantities (column ids). This may cause errors in further processing.\n")
+	return quantities
+
+def tabledispersion_format_columns_units(quantities, style=None, unit_style=None, degrees=True):
+	"""Format columns for dispersion table
+
+	Iterate over format_quantity_and_unit().
+
+	Returns:
+	List of strings. Formatted column headers.
+	"""
+	if style is None:
+		style = get_config('table_dispersion_obs_style', choices=['raw', 'plain', 'unicode', 'tex'])
+	if unit_style is None:
+		unit_style = get_config('table_dispersion_unit_style', choices=['raw', 'plain', 'unicode', 'tex'])
+	dimful = all_observables.dimful is True
+	columns = []
+	units = []
+	for q in quantities:
+		qstr, ustr = format_quantity_and_unit(q, style=style, unit_style=unit_style, dimful=dimful, degrees=degrees)
+		columns.append(qstr)
+		units.append(ustr)
+	return columns, units
+
+def tabledispersion_bandlabel_columns(data, bandlabels = None, to_str = True):
+	"""Headers for 'band columns'.
+	The headers are band labels or band label + character.
+
+	Arguments:
+	data        DiagData instance.
+	bandlabels  List of integers, 2-tuples, or strings or None. The band labels
+	            and/or characters. If None, extract band indices from data. The
+	            configuration setting 'csv_multi_index' determines how tuples
+	            are written to the file.
+	to_str      True or False. If True, the return values are lists of strings.
+	            If False, the return values are the same object type as the band
+	            labels and/or characters (integers, tuples, or strings).
+
+	Returns:
+	colheadings1  List of strings (to_str is True) or generic type (to_str is
+	              False). The first row of column headings.
+	colheadings2  List of strings (to_str is True) or generic type (to_str is
+	              False), or None. If set, the second row of column headings.
+	              None means there is no second row.
+	"""
+	if bandlabels is None:
+		bandlabels = data.get_all_bindex()
+
+	multi_index_fmt = get_config("csv_multi_index", choices = ['tuple', 'short', 'split', 'tworow', 'llindex', 'bindex']).lower()
+
+	if any([isinstance(b, tuple) for b in bandlabels]):
+		colheadings2 = None
+		if multi_index_fmt == 'tuple':
+			colheadings1 = [str(b) for b in bandlabels]
+		elif multi_index_fmt == 'bindex':
+			colheadings1 = ["%s" % (b[1] if isinstance(b, tuple) else b) for b in bandlabels]
+		elif multi_index_fmt == 'llindex':
+			colheadings1 = ["%s" % b[0] if isinstance(b, tuple) else "" for b in bandlabels]
+		elif multi_index_fmt == 'short':
+			colheadings1 = ["%s,%s" % b if isinstance(b, tuple) else "%s" % b for b in bandlabels]
+		elif multi_index_fmt in ['split', 'tworow']:
+			colheadings1 = [b[0] if isinstance(b, tuple) else b for b in bandlabels]
+			colheadings2 = [b[1] if isinstance(b, tuple) else b for b in bandlabels]
+		else:
+			raise ValueError("Invalid value for multi_index_fmt")
+	else:
+		colheadings1 = bandlabels
+		data_k0 = data.get_base_point()
+		char = None if data_k0 is None or data_k0.char is None else [data_k0.get_char((b,)) for b in bandlabels]
+		colheadings2 = None if char is None else ["" if ch is None else ch for ch in char]
+
+	if to_str:
+		colheadings1 = [str(c) for c in colheadings1]
+		if colheadings2 is not None:
+			colheadings2 = [str(c) for c in colheadings2]
+
+	return colheadings1, colheadings2
+
+def tabledispersion1d_columns(comp, degrees = True, observable = None, n = 1):
+	"""Get column headers (observables and units) for the 'byband' data files
+
+	Arguments:
+	comp        List or array. The vector components (momentum or magnetic
+	            field).
+	degrees     True or False. Determines the unit for the anguolar vector
+	            components
+	observable  String or None. The observable that the data represents. The
+	            value None implies energy.
+	n           Integer. The number of times the observables column is repeated.
+
+	Returns:
+	obsheadings   List of strings. The column headings with the observable
+	              labels, including vector components.
+	unitheadings  List of strings. The column headings with the units.
+	"""
+	if not isinstance(comp, (list, tuple, np.ndarray)):
+		raise TypeError("Argument comp must be a list, tuple, or array")
+
+	vunits = vector_units(comp, degrees = degrees)
+	if isinstance(observable, tuple) and len(observable) == 2:
+		obsstr, ustr = observable
+	else:
+		obsstr, ustr = format_quantity_and_unit(observable)
+
+	return list(comp) + [obsstr] * n, list(vunits) + [ustr] * n
+
+def get_format(q, float_precision = 5, degrees = True):
+	"""Get data formats for dispersion table.
+
+	We use % style formatting rather than {} style formatting because the former
+	is about 30% faster. For dispersion tables, we use this custom version
+	instead of tableo.tools.get_format().
+
+	Arguments:
+	q                String. Quantity (column id).
+	float_precision  Integer. Number of digits for floating point numbers.
+	degrees          True or False. Whether to express angles in degrees (True)
+	                 or radians (False).
+
+	Returns:
+	fmt   The format string, to be used as fmt % value.
+	"""
+	if q is None or q in ['e', 'E'] or q.startswith('dedk'):
+		fmt = float_format(float_precision, delta=-2)
+	elif q.endswith('index'):
+		fmt = '%i'
+	elif q == 'char':
+		fmt = '%s'
+	elif degrees and q in ['kphi', 'bphi', 'ktheta', 'btheta']:
+		fmt = float_format(float_precision, delta = -2)
+	else:
+		fmt = float_format(float_precision)
+	return fmt
+
+def get_tabledispersion_formats(quantities, **kwds):
+	"""Wrapper around get_format"""
+	return [get_format(q, **kwds) for q in quantities]
+
+### DISPERSION ###
+
+def disp(filename, data, params = None, observables = None, sort = True, erange = None, dependence = None):
+	"""Write 'flat' dispersion table, pandas version.
+
+	Arguments:
+	filename         String. The output file name.
+	data             DiagData instance. The data.
+	params           PhysParams instance. (Placeholder)
+	observables      List of strings or None. The columns containing
+	                 observables.
+	sort             True or False. Whether to sort the data at each data point
+	                 by eigenvalue.
+	erange           2-tuple or None. If set, write only the data for states
+	                 with energies in this range.
+	dependence       List of length 2 or 3, or None. If set, the list should of
+	                 the form [paramval, paramstr, paramunit (optional)], where
+	                 paramval is an array of parameter values (typically
+	                 magnetic field), paramstr is its label (typically 'b' for
+	                 magnetic field), and paramunit the unit (typically 'T' for
+	                 tesla, in case of magnetic field; optional). If None,
+	                 assume a dispersion (momentum dependence) as opposed to
+	                 parameter dependence.
+
+	No return value.
+	"""
+	if data is None or len(data) == 0:
+		sys.stderr.write("Warning (tableo.disp): No data to be written.\n")
+		return
+
+	## Filter data by energy range
+	if isinstance(erange, (tuple, list)) and len(erange) >= 2:
+		data_sel = data.select_eival(tuple(erange[:2]))
+	elif erange is None:
+		data_sel = data
+	else:
+		raise TypeError("Argument erange must be None or a list or tuple of 2 elements")
+	if len(data_sel) == 0:
+		sys.stderr.write("Warning (tableo.disp): No data within energy range.\n")
+		return
+
+	float_precision = get_config_int('table_dispersion_precision', minval = 2)
+	if float_precision < 3:
+		sys.stderr.write("Warning (tableo.disp): Precision (option 'table_dispersion_precision') must be at least 2, ideally >= 3.\n")
+
+	if isinstance(dependence, (list, tuple)) and len(dependence) in [2, 3] and len(dependence[0]) == len(data_sel):
+		paramval, paramstr = dependence[0], dependence[1]
+	elif dependence is None:
+		paramval, paramstr = None, None
+	else:
+		sys.stderr.write("ERROR (tableo.disp): Combination of data and dependence is invalid. No data written.\n")
+		return
+
+	quantities = get_tabledispersion_quantities(data_sel, observables = observables, paramval = paramval, paramstr = paramstr)
+	formats = get_tabledispersion_formats(quantities, float_precision = float_precision, degrees = data.get_degrees(True))
+	formatted_columns, formatted_units = tabledispersion_format_columns_units(quantities, degrees = data.get_degrees(True))  # formatted column headers
+
+	# Extract table data
+	disp_data = data_sel.get_values_dict(quantities, sort=sort)
+
+	# Write file
+	write(filename, disp_data, formats, columns=formatted_columns, units=formatted_units)
+	# TODO: Slightly worse performance than previous version with table_disp_row()
+	return
+
+### DISPERSION BY BAND ###
+
+def tabledispersion1d(filename, data, bandlabels = None, observable = None, transform = None, float_precision = 5):
+	"""Write one-dimensional dispersion table with bands as columns, non-pandas version.
+
+	Arguments:
+	filename         String. The output file name.
+	data             DiagData instance. The data.
+	bandlabels       List of {integers, 2-tuples, strings} or None. If set, use
+	                 these band labels as column headers. If None, extract the
+	                 band indices from data.
+	observable       String or None. If set, then the written data are the
+	                 values of that observable. If None, write the energies.
+	float_precision  Integer. Number of digits for floating point numbers.
+
+	No return value.
+	"""
+	if len(data.shape) != 1:
+		sys.stderr.write("ERROR (tabledispersion1d): Not a 1D grid\n")
+		return
+	if bandlabels is None:
+		bandlabels = data.get_all_bindex()
+
+	kgrid = data.grid
+	if kgrid is not None:
+		comp = kgrid.get_components(include_prefix = True)
+		kval = np.array([kgrid.get_values(c) for c in comp]).transpose()
+	elif isinstance(data[0].k, Vector):
+		kval = np.array([d.k.len() for d in data])
+		comp = ['k']
+	else:
+		raise TypeError
+
+	# Compose data
+	tabledata = []
+	for b in bandlabels:
+		kdata, zdata = data.get_plot_coord(b, "index")
+		if np.all(np.isnan(zdata)):
+			continue
+		if transform is not None:
+			densdata = transform.apply(zdata, kdata.get_array()[0] if isinstance(kdata, VectorGrid) else kdata)
+			tabledata.append(densdata)
+		elif observable is None:
+			tabledata.append(zdata)
+		else:
+			tabledata.append(np.real(data.get_observable(observable, b, "index")))
+	extdata = np.vstack((kval.T, *tabledata))
+
+	# Determine formats
+	k_fmt_all = [get_format(co, float_precision, degrees = data.get_degrees(True)) for co in comp]
+	float_fmt = get_format(observable, float_precision)
+	formats = k_fmt_all + [float_fmt] * len(tabledata)
+
+	# Column headers (quantity and units)
+	degrees = data.get_degrees(True)
+	obs = (transform.qstr, transform.ustr) if transform is not None else observable
+	obsheadings, unitheadings = tabledispersion1d_columns(comp, degrees=degrees, observable=obs, n=len(tabledata))
+	columns = ['%s' % h for h in obsheadings]
+	units = ['%s' % h for h in unitheadings] if get_config_bool('table_dispersion_units') else None
+
+	# Band labels (written to file separately as extra headings)
+	bandheadings1, bandheadings2 = tabledispersion_bandlabel_columns(data, bandlabels)
+	blanks = ["" for _ in comp]
+	bandheadings = blanks + bandheadings1 if bandheadings2 is None else [blanks + bandheadings1, blanks + bandheadings2]
+
+	# Write file
+	write(filename, extdata, formats, columns=columns, units=units, extraheader=bandheadings)
+	return
+
+def tabledispersion2d(filename, data, bandlabel, observable = None, float_precision = 5):
+	"""Write two-dimensional dispersion table for a single band, non-pandas version.
+
+	Note:
+	This function writes one file for a single band. In order to write data for
+	multiple bands, call this function repeatedly over these bands.
+
+	Arguments:
+	filename         String. The output file name.
+	data             DiagData instance. The data.
+	bandlabel        Integers or 2-tuples. The band label, either band index or
+	                 (LL index, band index) for which this file should be
+	                 written.
+	observable       String or None. If set, then the written data are the
+	                 values of that observable. If None, write the energies.
+	float_precision  Integer. Number of digits for floating point numbers.
+
+	No return value.
+	"""
+	kdata, zdata = data.get_plot_coord(bandlabel, "index2d")
+	if np.all(np.isnan(zdata)):
+		return
+
+	kdata, zdata = data.get_plot_coord(bandlabel, "index2d")
+	if np.all(np.isnan(zdata)):
+		return
+	if len(zdata.shape) != 2:
+		sys.stderr.write("ERROR (tabledispersion2d): Not a 2D grid\n")
+		return
+
+	# Data columns (ky values, energy/observable data)
+	kgrid = data.grid
+	if kgrid is not None:
+		kxval, kyval = kgrid.get_array()
+		_, comp, _, _ = kgrid.get_var_const()
+	elif isinstance(kdata[0][0], Vector):
+		kxval = np.array([kk[0].to_tuple()[0] for kk in kdata])
+		kyval = np.array([k.to_tuple()[1] for k in kdata[0]])
+		comp = ['kx', 'ky']
+	else:
+		raise TypeError
+	if observable is None:
+		plotdata = zdata
+	else:
+		plotdata = np.real(data.get_observable(observable, bandlabel, "index2d"))
+
+	# Determine formats
+	k_fmt, k2_fmt = [get_format(co, float_precision, degrees=data.get_degrees(True)) for co in comp]
+	float_fmt = get_format(observable, float_precision)
+	formats = [float_fmt] * len(kxval)
+
+	# First row: Band or character label and kx values
+	data_k0 = data.get_base_point()
+	lb = bandlabel if isinstance(bandlabel, tuple) else (bandlabel,)
+	char = None if data_k0 is None or data_k0.char is None else data_k0.get_char(lb)
+	labeltxt = "" if char is None else char
+	columns = [k_fmt % kx for kx in kxval]
+	index = [k2_fmt % ky for ky in kyval]
+
+
+	# Data labels and axis labels
+	if get_config_bool('table_dispersion_data_label'):
+		datalabel, dataunit = format_quantity_and_unit(observable)
+	else:
+		datalabel, dataunit = None, None
+	axislabels = list(comp)
+	if get_config_bool('table_dispersion_units'):
+		vunits = vector_units(comp, degrees=data.get_degrees(True))
+		axisunits = list(vunits)
+	else:
+		axisunits, dataunit = None, None
+
+	# Write file
+	write(
+		filename, plotdata, formats, columns=columns, index=index,
+		label_text=labeltxt, axislabels=axislabels, axisunits=axisunits,
+		datalabel=datalabel, dataunit=dataunit
+	)
+	return
+
+def tabledispersion3d(filename, data, bandlabel, observable = None, float_precision = 5):
+	"""Write 'three-dimensional' dispersion table for a single band, non-pandas version.
+	The second and third dimension are in the first and second columns.
+
+	Note:
+	This function writes one file for a single band. In order to write data for
+	multiple bands, call this function repeatedly over these bands.
+
+	Arguments:
+	filename         String. The output file name.
+	data             DiagData instance. The data.
+	bandlabel        Integers or 2-tuples. The band label, either band index or
+	                 (LL index, band index) for which this file should be
+	                 written.
+	observable       String or None. If set, then the written data are the
+	                 values of that observable. If None, write the energies.
+	float_precision  Integer. Number of digits for floating point numbers.
+
+	No return value.
+	"""
+	kdata, fdata = data.get_plot_coord(bandlabel, "index")
+	if np.all(np.isnan(fdata)):
+		return
+
+	kgrid = data.grid
+	if kgrid is not None:
+		kxval, kyval, kzval = kgrid.get_array()
+		_, comp, _, _ = kgrid.get_var_const()
+	else:
+		raise TypeError("Data must have a VectorGrid instance")
+
+	if observable is not None:
+		fdata = np.real(data.get_observable(observable, bandlabel, "index"))
+
+	# Compose data
+	data3d = fdata.reshape((len(kxval), len(kyval), len(kzval)))
+	data2d = data3d.transpose((0, 2, 1)).reshape((len(kxval), len(kyval) * len(kzval)))
+	kyval_g, kzval_g = np.meshgrid(kyval, kzval, indexing='xy')
+
+	# Determine formats
+	k_fmt, k2_fmt, k3_fmt = [get_format(co, float_precision, degrees = data.get_degrees(True)) for co in comp]
+	float_fmt = get_format(observable, float_precision)
+	formats = [float_fmt] * len(kxval)
+
+	# First row: Band or character label and kx values
+	data_k0 = data.get_base_point()
+	lb = bandlabel if isinstance(bandlabel, tuple) else (bandlabel,)
+	char = None if data_k0 is None or data_k0.char is None else data_k0.get_char(lb)
+	labeltxt = "" if char is None else char
+	columns = [k_fmt % kx for kx in kxval]
+	index = [[k3_fmt % kz for kz in kzval_g.flatten()], [k2_fmt % ky for ky in kyval_g.flatten()]]
+
+	# Data labels and axis labels
+	if get_config_bool('table_dispersion_data_label'):
+		datalabel, dataunit = format_quantity_and_unit(observable)
+	else:
+		datalabel, dataunit = None, None
+	axislabels = [comp[0], comp[2], comp[1]]  # Reverse 2nd and 3rd component
+	if get_config_bool('table_dispersion_units'):
+		vunits = vector_units(comp, degrees=data.get_degrees(True))
+		axisunits = [vunits[0], vunits[2], vunits[1]]  # Reverse 2nd and 3rd component
+	else:
+		axisunits, dataunit = None, None
+
+	# Write file
+	write(
+		filename, data2d, formats, columns=columns, index=index,
+		label_text=labeltxt, axislabels=axislabels, axisunits=axisunits,
+		datalabel=datalabel, dataunit=dataunit
+	)
+	return
+
+def tabledispersion_ndim(dim):
+	"""Selector function for tabledispersion{1d, 2d, 3d}
+
+	Arguments:
+	dim       1, 2, or 3. Dimension of the data grid.
+
+	Returns:
+	fn        Function. That is tabledispersion{1d, 2d, 3d}.
+	"""
+	if dim == 1:
+		return tabledispersion1d
+	elif dim == 2:
+		return tabledispersion2d
+	elif dim == 3:
+		return tabledispersion3d
+	else:
+		raise ValueError("Invalid value for dim")
+
+def disp_byband(
+		filename, data = None, params = None, observable = None, erange = None,
+		dependence = None, transform = None):
+	"""Dispersion table (by band) in csv or similar tabular format. Wrapper function.
+	For a one dimensional dispersion/dependence, this will provide a single file
+	with bands as columns. For two or three dimensions, this will provide a
+	separate file for each band.
+
+	Arguments:
+	filename          String. The output file name.
+	data              DiagData. The data as obtained from diagonalization.
+	params            PhysParams instance. (Placeholder)
+	observables       List of strings or None. The observables that should be
+	                  included in the written data. If None, include all
+	                  observables in data.
+	erange            2-tuple or None. If set, write only the data for states
+	                  with energies in this range.
+	dependence        (Placeholder)
+	transform         An ETransform instance. This may be used to change the
+	                  vertical axis to a different quantity that has a
+	                  one-to-one relation to energy, for example integrated DOS.
+
+	No return value.
+	"""
+	observable_warning_issued = False
+
+	if data is None:
+		sys.stderr.write("Warning (tableo.disp_byband): No data to be written.\n")
+		return
+
+	data_labels, plot_mode = data.get_data_labels(by_index = True)
+	dim = len(data.shape) if data.grid is None else len(data.grid.shape)
+	if dim not in [1, 2, 3]:
+		sys.stderr.write("Warning (tableo.disp_byband): Invalid dimension.\n")
+		return
+
+	float_precision = get_config_int('table_dispersion_precision', minval = 2)
+	if float_precision < 3:
+		sys.stderr.write("Warning (tableo.disp_byband): Precision (option 'table_dispersion_precision') must be at least 2, ideally >= 3.\n")
+
+	if isinstance(erange, (tuple, list)) and len(erange) >= 2:
+		emin, emax = erange[:2]
+	elif erange is None:
+		emin, emax = None, None
+	else:
+		raise TypeError("Argument erange must be None or a list or tuple of 2 elements")
+
+	if "verbose" in sys.argv:
+		print("Writing %i data series; 'plot mode' %s" % (0 if data_labels is None else len(data_labels), plot_mode))
+	if plot_mode != "index":
+		sys.stderr.write("Warning (tableo.disp_byband): Plot mode 'index' is required, but not available\n")
+		return
+	if data_labels is None or data_labels == []:
+		sys.stderr.write("Warning (tableo.disp_byband): Data labels are required, but are not available.\n")
+		return
+	fprefix = ".".join(filename.split(".")[:-1])
+	fextension = filename.split(".")[-1]
+	obsids = data[0].obsids
+
+	if dim == 1:
+		fname = fprefix + ".byband." + fextension
+		tabledispersion_ndim(1)(fname, data, bandlabels = None, observable = None, float_precision = float_precision, transform = transform)
+		if transform is not None and observable is not None:
+			sys.stderr.write("Warning (tableo.disp_byband): Argument observable is ignored if transform is not None.\n")
+		elif observable is not None and obsids is not None and observable in obsids:
+			fname = fprefix + "." + observable + ".byband." + fextension
+			tabledispersion_ndim(1)(fname, data, bandlabels = None, observable = observable, float_precision = float_precision)
+		return
+
+	if transform is not None:
+		sys.stderr.write("Warning (tableo.disp_byband): Argument transform is ignored for 2 and 3 dimensions.\n")
+
+	for lb in data_labels:
+		kdata, zdata = data.get_plot_coord(lb, "index2d" if dim == 2 else "index")
+		if np.all(np.isnan(zdata)):
+			continue
+		zmin, zmax = np.nanmin(zdata), np.nanmax(zdata)
+
+		# do not plot bands that lie completely outside the energy range
+		if emin is not None and zmax < emin:
+			continue
+		if emax is not None and zmin > emax:
+			continue
+		fname = fprefix + "." + bandlabel_to_fileid(lb) + "." + fextension
+
+		tabledispersion_ndim(dim)(fname, data, lb, observable = None, float_precision = float_precision)
+
+		if observable is not None and obsids is not None:
+			if observable in obsids:
+				fname = fprefix + "." + observable + "." + bandlabel_to_fileid(lb) + "." + fextension
+				tabledispersion_ndim(dim)(fname, data, lb, observable = observable, float_precision = float_precision)
+			elif not observable_warning_issued:
+				sys.stderr.write("Warning (tableo.disp_byband): Observable '%s' not available or unsuitable for 2D table output.\n" % observable)
+				observable_warning_issued = True  # prevent warning from being shown many times
+
+	return
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/tableo/dos.py b/kdotpy-v1.0.0/src/kdotpy/tableo/dos.py
new file mode 100644
index 0000000000000000000000000000000000000000..136252dc687b34de157b5f7396410c1434cb3584
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/tableo/dos.py
@@ -0,0 +1,345 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import sys
+
+import numpy as np
+
+from ..config import get_config_bool, get_config, get_config_int
+from ..density import DensityScale
+from ..momentum import VectorGrid, Vector
+from ..phystext import format_unit
+from .tools import get_precision, get_label_unit_style
+from .simple import simple2d, simple
+
+
+def local_density(params, densitydata, integrated = False, filename = None, clabel = None):
+	"""Table of local (integrated) density of states, wrapper version for tableo.simple2d.
+
+	Arguments:
+	params       NOT USED (to make call signature identical to
+	             ploto.local_density() and perhaps for future use)
+	densitydata  DensityData instance
+	integrated   True or False. Whether to output IDOS or DOS.
+	filename     String. The output file.
+	clabel       String or None. Format string for the first-column header,
+	             where {x} is replaced by k or B (or the appropriate component)
+	             and {y} by E.
+
+	No return value
+	"""
+	precision = get_precision('table_dos_precision')
+	scaled = get_config_bool('table_dos_scaling')
+	negexp = get_config_bool('table_dos_units_negexp')
+	label_style, unit_style = get_label_unit_style()
+	if integrated:
+		xval, ee, dens = densitydata.xyz_idos(scaled = scaled)
+		if clabel is None:
+			clabel = 'IDOS({x}, E)'  # {x} to be replaced later by k or B
+	else:
+		xval, ee, dens = densitydata.xyz_dos(scaled = scaled)
+		if clabel is None:
+			clabel = 'DOS({x}, E)'  # {x} to be replaced later by k or B
+	if dens is None:
+		sys.stderr.write("Warning (tableo.local_density): " + ("IDOS" if integrated else "DOS") + " is not defined.\n")
+		return None
+	datalabel = densitydata.qstr(style = label_style, scaled = scaled, integrated = integrated)
+	dataunit = densitydata.unitstr(
+		style = unit_style, scaled = scaled, integrated = integrated, negexp = negexp)
+
+	if isinstance(xval, VectorGrid):
+		degrees = xval.degrees  # extract value before we replace xval
+		xval, xvar, _, _ = xval.get_var_const()
+		if xvar.startswith('b'):
+			xstr = 'B'
+			if len(xvar) > 1:
+				xstr += xvar[1:]
+		else:
+			xstr = xvar
+		xunit = 'nm^-1' if xvar.startswith('k') else 'T' if xvar.startswith('b') else ''
+		if xvar.endswith('phi') or xvar.endswith('theta'):
+			xunit = 'deg' if degrees else 'rad'
+	else:
+		xstr = 'B' if densitydata.ll else 'k'
+		xunit = 'T' if densitydata.ll else 'nm^-1'
+
+	# TODO: Apply style
+	clabel = clabel.format(x = xstr, y = 'E')
+	simple2d(
+		filename, xval, ee, dens, float_precision = precision,
+		clabel = clabel, axislabels = [xstr, "E"], axisunits = [xunit, "meV"],
+		datalabel = datalabel, dataunit = dataunit)
+	return
+
+
+def dos_idos(params, densitydata, outputid, precision = None):
+	"""Table of density of states, wrapper version."""
+	dosdim = densitydata.kdim
+	idos = densitydata.get_idos()
+	dos = densitydata.get_dos()
+	ee = densitydata.ee
+
+	dens_qty = get_config('dos_quantity')
+	dens_unit = get_config('dos_unit')
+	unit_style = get_config('table_data_unit_style', choices=['none', 'false', 'raw', 'plain', 'unicode', 'tex'])
+	if precision is None:
+		precision = get_config_int('table_dos_precision', minval = 0)
+
+	if dens_qty is not None and dens_unit == 'cm':
+		idos_unit = format_unit(7 * dosdim, ('cm', -dosdim), style=unit_style)
+		dos_unit = format_unit(7 * dosdim, ('cm', -dosdim), ('meV', -1), style=unit_style)
+	else:
+		idos_unit = format_unit(('nm', -dosdim), style=unit_style)
+		dos_unit = format_unit(('nm', -dosdim), ('meV', -1), style=unit_style)
+	dtable_label = ['E', 'IDOS_k', 'DOS_k', 'n', 'dn/dE']
+	dtable_units = ['meV', idos_unit, dos_unit, idos_unit, dos_unit]
+	if idos is None:
+		idos = np.full_like(ee, np.nan)
+	if dos is None:
+		dos = np.full_like(ee, np.nan)
+	idos_k = idos * (2 * np.pi) ** dosdim
+	dos_k = dos * (2 * np.pi) ** dosdim
+	dtable_data = [ee, idos_k, dos_k, idos, dos]
+	simple(
+		"dos%s.csv" % outputid, data=dtable_data, float_precision=(precision, 'g'),
+		clabel=dtable_label, cunit=dtable_units)
+	return ee, idos
+
+
+def dos_byband(filename, densitydata, integrated = False, showtotal = False, precision = None):
+	"""Table of density of states by band, wrapper version.
+	The first column is energy. All subsequent data columns represent the
+	contribution of each band to the (integrated) density of states.
+
+	Arguments:
+	filename      String. The output file name.
+	densitydata   DensityDataByBand instance.
+	kdim          1, 2, or 3. Number of momentum dimensions.
+	integrated    True or False. If True, write the integrated density of
+	              states. If False, write the (non-integrated) density of
+	              states.
+	showtotal     True or False. If True, show an extra column with the sum of
+	              the (integrated) density of states of all calculated bands.
+	precision     Integer or None. Number of digits for floating point numbers.
+	              If None, use the configuration setting 'table_dos_precision'.
+
+	No return value.
+	"""
+	if densitydata is None:
+		sys.stderr.write("Warning (tableo.dos_byband): No data.\n")
+		return
+	if densitydata.kdim not in [1, 2, 3]:
+		raise ValueError("Argument kdim must be 1, 2, or 3.")
+	dens_total = densitydata.get_idos() if integrated else densitydata.get_dos()
+	dens_b = densitydata.get_idos_dict() if integrated else densitydata.get_dos_dict()
+	ee = densitydata.ee
+	if dens_total is None or dens_b is None:
+		sys.stderr.write("Warning (tableo.dos_byband): No data.\n")
+		return
+	if precision is None:
+		precision = get_config_int('table_dos_precision', minval = 0)
+	unit_style = get_config('table_data_unit_style', choices = ['none', 'false', 'raw', 'plain', 'unicode', 'tex'])
+	qstr = densitydata.qstr(style = unit_style, integrated = integrated)
+	unitstr = densitydata.unitstr(style = unit_style, integrated = integrated)
+
+	# Build table data column by column
+	table_data = [ee]
+	table_clabel = ['E']
+	table_units = ['meV']
+	nonzero_dos_by_band = 0
+	for b in sorted(dens_b):
+		if np.amax(np.abs(dens_b[b])) >= 1e-10:  # skip states out of range
+			band_data = dens_b[b] if integrated else np.gradient(dens_b[b]) / np.gradient(ee)
+			table_data.append(densitydata.scaledvalues(band_data))
+			table_clabel.append(qstr + ('(%i)' % b))
+			table_units.append(unitstr)
+			nonzero_dos_by_band += 1
+	if nonzero_dos_by_band == 0:
+		sys.stderr.write("Warning (write_table_dos_by_band): No data.\n")
+		return
+	if showtotal:
+		sum_data = dens_total if integrated else np.gradient(dens_total) / np.gradient(ee)
+		table_data.append(densitydata.scaledvalues(sum_data))
+		table_clabel.append(qstr + '(total)')
+		table_units.append(unitstr)
+	simple(filename, data = table_data, float_precision = (precision, 'g'), clabel = table_clabel, cunit = table_units)
+	return
+
+
+def energy_at_density(filename, bval, densval, ee_at_idos, float_precision = 5, clabel = "E(B, n)"):
+	"""Table of (Fermi) energy at density, wrapper version.
+	The result is a two dimensional array of (Fermi) energy as function of
+	magnetic field and (carrier) density.
+
+	Arguments:
+	filename         String. The output file name.
+	bval             Array of dimension one. Magnetic field values B.
+	densval          Array of dimension one. Density values n at which this
+	                 quantity has been evaluated.
+	ee_at_idos       Array of dimension two. The data: Energies as function of
+	                 magnetic field B and density n.
+	integrated       True or False. If True, write the integrated density of
+	                 states. If False, write the (non-integrated) density of
+	                 states.
+	float_precision  Integer or None. Number of digits for floating point
+	                 numbers. If None, use the default in tableo.simple2d().
+
+	No return value.
+	"""
+	nb_idos = np.asarray(ee_at_idos).shape[1]
+	xval0 = np.array([b.z() if isinstance(b, Vector) else b for b in bval])
+	if nb_idos > len(bval) and (nb_idos - 1) % (len(xval0) - 1) == 0:
+		subdiv = (nb_idos - 1) // (len(xval0) - 1)
+		xval1 = np.array([(1. - j / subdiv) * xval0[:-1] + (j / subdiv) * xval0[1:] for j in range(0, subdiv)])
+		xval1 = np.concatenate((np.hstack(xval1.transpose()), xval0[-1:]), axis=0)
+	else:
+		xval1 = xval0
+
+	unit_style = get_config('table_data_unit_style', choices = ['none', 'false', 'raw', 'plain', 'unicode', 'tex'])
+	dens_qty = get_config('dos_quantity')
+	dens_unit = get_config('dos_unit')
+	dscale = DensityScale(np.asarray(densval), dens_qty, dens_unit, kdim = 2, ll = True)
+	qstr = dscale.qstr(style = unit_style, integrated = True)
+	unitstr = dscale.unitstr(style = unit_style, integrated = True)
+	simple2d(filename, xval1, dscale.scaledvalues(), np.asarray(ee_at_idos), float_precision = float_precision, clabel = clabel, axislabels = ["B", qstr], axisunits = ["T", unitstr])
+	return
+
+def densityz(params, densz, filename = "", **kwds):
+	"""Output density as function of z
+
+	This function dispatches the work to densityz_1d() and densityz_multiple().
+
+	Arguments:
+	params    PhysParams instance. Used to extract the array of z values.
+	densz     dict instance. Typically, it contains the keys 'total', 'e', 'h',
+	          and/or 'bg'. The values must be arrays of dimension 1 or 2.
+	filename  Output filename
+	**kwds    Keyword arguments passed to densityz_1d() or densityz_multiple().
+	"""
+	zval = params.zvalues_nm()
+
+	# Filter data and check dimensions
+	data = {q: value for q, value in densz.items() if isinstance(value, np.ndarray)}
+	if len(data) == 0:
+		sys.stderr.write("ERROR (tableo.densityz): No data.\n")
+		return
+	data_dim = [v.ndim for v in data.values() if isinstance(v, np.ndarray)]
+	if not all(dim in [1, 2] for dim in data_dim):
+		sys.stderr.write(f"ERROR (tableo.densityz): Data must consist of 1- or 2-dimensional arrays.\n")
+		return
+
+	if all(dim == 1 for dim in data_dim):
+		# Note: densityz_1d() currently does not take keyword arguments.
+		densityz_1d(filename, zval, data)
+	else:  # any(dim == 2 for dim in data_dim)
+		densityz_multiple(filename, zval, data, **kwds)
+	return
+
+def densityz_1d(filename, zval, data):
+	"""Table for density as function of z by column
+
+	The columns are the z values followed by the values in data.
+
+	Arguments:
+	filename  Output filename
+	zval      Numpy array of dim 1. The z values.
+	data      dict instance. The values must be arrays of equal length to zval.
+	"""
+
+	unit_style = get_config('table_data_unit_style', choices=['none', 'false', 'raw', 'plain', 'unicode', 'tex'])
+	precision = (8, 'g')  # TODO
+	dens_lunit = get_config('dos_unit')
+	dens_exp = 27 if dens_lunit == 'm' else 21 if dens_lunit == 'cm' else 0
+	dens_unit = format_unit(dens_exp, (dens_lunit, -3), style=unit_style)
+
+	# Check data
+	if any(val.shape != zval.shape for val in data.values()):
+		raise ValueError("Invalid shape for data values.")
+
+	alldata = {'z': zval, **data}
+	clabel = ['z'] + ['densz' if q == 'total' else f'densz_{q}' for q in data]
+	cunit = ['nm'] + [dens_unit for _ in data]
+	simple(filename, alldata, float_precision=precision, clabel=clabel, cunit=cunit)
+	return
+
+def densityz_multiple(filename, zval, data, xval=None, xlabel="x", xunit=""):
+	"""Multiple table files for density as function of z by column.
+
+	The columns are the z values followed by the values in data.
+
+	Arguments:
+	filename  Output filename
+	zval      Numpy array of dim 1. The z values.
+	data      dict instance. The values must be arrays of dimension 1 or 2.
+	xval      Numpy array. The values or labels to put on the x axis (column
+	          headers).
+	xlabel    String. The label (quantity) for the x values.
+	xunit     String. The unit for the x values.
+
+	Note:
+	xval, xlabel, and xunit are ignored for 1-dimensional data.
+	"""
+	unit_style = get_config('table_data_unit_style', choices=['none', 'false', 'raw', 'plain', 'unicode', 'tex'])
+	precision = (8, 'g')  # TODO
+	dens_lunit = get_config('dos_unit')
+	dens_exp = 27 if dens_lunit == 'm' else 21 if dens_lunit == 'cm' else 0
+	dens_unit = format_unit(dens_exp, (dens_lunit, -3), style=unit_style)
+
+	filename_ending = filename[5:] if filename.startswith('densz') else filename
+	for q, value in data.items():
+		label = 'densz' if q == 'total' else f'densz_{q}'
+		fname = label + filename_ending
+		if value.ndim == 1:
+			simple(
+				fname, {'z': zval, label: value}, float_precision=precision,
+				clabel=['z', label], cunit=['nm', dens_unit]
+			)
+		elif value.ndim == 2:
+			clabel = f"{label}({xlabel}, z)"
+			if xval is None:
+				raise ValueError("Argument xval must be specified for 2-dim output")
+			simple2d(
+				fname, xval, zval, value, float_precision=precision,
+				clabel=clabel, axislabels=[xlabel, "z"], axisunits=[xunit, "nm"],
+				datalabel='rho', dataunit=dens_unit
+			)
+		else:
+			raise ValueError("Invalid dimension for value")
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/tableo/postwrite.py b/kdotpy-v1.0.0/src/kdotpy/tableo/postwrite.py
new file mode 100644
index 0000000000000000000000000000000000000000..e01ef6b9aa69682eecd2788dcae155ded6b9c9f6
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/tableo/postwrite.py
@@ -0,0 +1,252 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import os
+import sys
+import csv
+import tarfile
+from .tools import format_row
+
+try:
+	import zipfile
+	HAS_ZIPFILE = True
+except:
+	HAS_ZIPFILE = False
+
+### POST-WRITE FUNCTIONS ###
+
+def write_axislabels(filename, axislabels, axisunits = None, datalabel = None, dataunit = None, sep = '', widths = None):
+	"""Write axis labels in a 2- or 3-dim CSV output file, after the data has been written.
+
+	Arguments:
+	filename    String. The output file name.
+	axislabels  List of strings. Axis labels that should be written.
+	axisunits   List of strings or None. Unit labels that should be written.
+	datalabel   String or None. Data label that should be written at the end of
+	            the second row. If None, do not write a label.
+	dataunit    String or None. Unit associated to the data, written after the
+	            data label. If None, do not write a unit.
+	sep         String. Column separator.
+	widths      List of integers or None. If set, align to these column widths.
+	            If None, do not align.
+	"""
+	if not isinstance(axislabels, list) or len(axislabels) == 0:
+		raise TypeError("Argument axislabels must be a nonempty list of strings.")
+	columnlabel, *rowlabels = axislabels
+	if axisunits is None:
+		columnunit, rowunits = None, None
+	elif isinstance(axisunits, list):
+		if len(axisunits) != len(axislabels):
+			sys.stderr.write("Warning (tableo.write_axislabels): Argument axisunits must be a list of the same length as clabel.\n")
+			columnunit, rowunits = None, None
+		else:
+			columnunit, *rowunits = axisunits
+	else:
+		raise TypeError("Argument axisunits must be a list or None")
+
+	try:
+		f = open(filename, 'r', encoding = 'utf-8')
+	except:
+		sys.stderr.write("Warning (tableo.write_axislabels): File %s cannot be read\n" % filename)
+		return
+	contents = f.readlines()
+	f.close()
+
+	# Open file again for counting number of rows (if not an aligned file)
+	if widths is None:
+		with open(filename, 'r', encoding = 'utf-8') as csvfile:
+			csvreader = csv.reader(csvfile, delimiter = sep)
+			nrows = [len(row) for row in csvreader]
+	else:
+		nrows = [len(x) for x in contents]
+
+	# First row: first axis label and unit
+	if len(contents) < 2:
+		sys.stderr.write("Warning (tableo.write_axislabels): Missing or insufficient data in file %s\n" % filename)
+		return
+	contents[0] = contents[0].rstrip('\n')
+	contents[0] += sep + columnlabel
+	if axisunits is not None:
+		if widths is not None and datalabel is not None:
+			nsep = max(len(columnlabel), len(datalabel)) - len(columnlabel)
+			contents[0] += nsep * sep  # add extra sep for alignment
+		contents[0] += sep + columnunit
+	contents[0] += '\n'
+
+	# Second row: data label and unit
+	if datalabel is not None:
+		contents[1] = contents[1].rstrip('\n')
+		nsep = max(1 + nrows[1] - nrows[0], 1)
+		contents[1] += nsep * sep + datalabel
+		if dataunit is not None:
+			if widths is not None and axisunits is not None:
+				nsep = max(len(datalabel), len(columnlabel)) - len(datalabel)
+				contents[1] += nsep * sep  # add extra sep for alignment
+			contents[1] += sep + dataunit
+		contents[1] += '\n'
+
+	# Bottom: second, third, ... axis labels and units
+	if rowlabels:
+		contents.append(format_row(['%s' % l for l in rowlabels], sep, widths = widths) + '\n')
+		if rowunits is not None:
+			contents.append(format_row(['%s' % u for u in rowunits], sep, widths = widths) + '\n')
+
+	try:
+		f = open(filename, 'w', encoding = 'utf-8')
+	except:
+		sys.stderr.write("Warning (tableo.write_axislabels): File %s cannot be written\n" % filename)
+		return
+
+	contents[-1].strip('\n')  # do not write a newline at the end
+	f.writelines(contents)
+	f.close()
+	return
+
+
+def write_extraheader(filename, labels, row = 1, sep ='', widths = None):
+	"""Write extra header (column labels) after the data has been written.
+
+	Arguments:
+	filename  String. The output file name.
+	labels    List of strings. Labels that should be written.
+	row       Integer or 'end'. If an integer, insert labels at that row (1 is
+	          first row). If 'end' insert them at the bottom.
+	sep       String. Column separator.
+	widths    List of integers or None. If set, align to these column widths. If
+	          None, do not align.
+	"""
+	if not isinstance(labels, list):
+		raise TypeError("Argument labels must be a list instance")
+	if all(isinstance(l, list) for l in labels):
+		pass
+	elif all(isinstance(l, str) for l in labels):
+		labels = [labels]
+	else:
+		raise TypeError("Argument labels must be a list of strings or a list of lists")
+
+	try:
+		f = open(filename, 'r', encoding = 'utf-8')
+	except:
+		sys.stderr.write("Warning (tableo.write_extraheader): File %s cannot be read\n" % filename)
+		return
+	contents = f.readlines()
+	f.close()
+
+	if row == 'end':
+		row = len(contents)
+	elif row > len(contents):
+		sys.stderr.write("Warning (tableo.write_extraheader): Unable to write labels to file %s\n" % filename)
+		return
+
+	if widths is not None:
+		if any(any(len(l) > w for l, w in zip(label, widths)) for label in labels):
+			sys.stderr.write("Warning (tableo.write_extraheader): Some labels were truncated in file %s\n" % filename)
+			labels = [[l[:w] for l, w in zip(label, widths)] for label in labels]
+
+	ins_str = ''
+	for l in labels:
+		ins_str += format_row(l, sep, widths = widths) + '\n'
+	contents.insert(row, ins_str)
+
+	try:
+		f = open(filename, 'w', encoding = 'utf-8')
+	except:
+		sys.stderr.write("Warning (tableo.write_extraheader): File %s cannot be written\n" % filename)
+		return
+	try:
+		f.writelines(contents)
+	except UnicodeEncodeError:
+		sys.stderr.write("ERROR (tableo.write_extraheader): Encoding error. Unable to write labels to file %s\n" % filename)
+	f.close()
+	return
+
+
+def create_archive(archive_file, src_files, fmt = None):
+	"""Create an archive from a list of source files and delete the original files.
+	Use the modules tarfile and zipfile. If the archiving fails, then do not
+	delete the original files.
+
+	Arguments:
+	archive_file   String. Destination file name.
+	src_files      List of strings. The file names of the source files.
+	fmt            String. Which compression format.
+
+	Returns:
+	True on success, False on error.
+	"""
+	success = False
+	if len(src_files) == 0:
+		return False
+	elif fmt in ['tar', 'gz', 'gzip', 'targz', 'tar.gz']:
+		tar_mode = 'w:gz' if 'gz' in fmt else 'w'
+		# For gzip, use compresslevel = 6, which is a good compromise between compression and time needed (min = 1, max = 9)
+		tar_kwds = {'compresslevel': 6} if 'gz' in fmt else {}
+		try:
+			with tarfile.open(name = archive_file, mode = tar_mode, **tar_kwds) as tarf:
+				for f in src_files:
+					tarf.add(f)
+		except Exception as e:
+			sys.stderr.write("ERROR (tar): %s\n" % str(e))
+			sys.stderr.write("ERROR (create_archive): tar has failed, see preceding error message\n")
+		else:
+			success = True
+	elif fmt in ['zipnozip', 'zip']:
+		if not HAS_ZIPFILE:
+			sys.stderr.write("ERROR (create_archive): Python module 'zipfile' is not available\n")
+			return False
+		compression = zipfile.ZIP_STORED if fmt == 'zipnozip' else zipfile.ZIP_DEFLATED
+		try:
+			with zipfile.ZipFile(archive_file, 'w', compression = compression) as zipf:
+				for f in src_files:
+					zipf.write(f)
+		except Exception as e:
+			sys.stderr.write("ERROR (zip): %s\n" % str(e))
+			sys.stderr.write("ERROR (create_archive): zip has failed, see preceding error message\n")
+		else:
+			success = True
+	else:
+		sys.stderr.write("ERROR (create_archive): Invalid value for argument fmt\n")
+	if success:
+		for f in src_files:
+			try:
+				os.remove(f)
+			except:
+				pass
+	return success
diff --git a/kdotpy-v1.0.0/src/kdotpy/tableo/read.py b/kdotpy-v1.0.0/src/kdotpy/tableo/read.py
new file mode 100644
index 0000000000000000000000000000000000000000..b7fadb33c471397f6a2434860f0435c6d6985180
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/tableo/read.py
@@ -0,0 +1,218 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+import sys
+import re
+import csv
+
+MAX_HEADER_ROWS = 10
+MAX_FOOTER_ROWS = 10
+
+def read_aligned_table(filename, spacechar = ' '):
+	"""Read CSV file with 'aligned table'
+
+	Algorithm:
+	Look for columns with spaces, i.e., positions which contain a space
+	character in each line.
+
+	Note:
+	The table must be aligned properly. Even if a single line does not align
+	properly, this function may fail.
+
+	Arguments:
+	filename   String. The input file name.
+	spacechar  String. Character that should be considered as space (i.e.,
+	           alignment character).
+
+	Returns:
+	List of list containing the non-space data, split by the spaces.
+	"""
+	try:
+		f = open(filename, 'r')
+	except:
+		sys.stderr.write("ERROR (read_aligned_table): File '%s' does not exist or cannot be read.\n" % filename)
+		return None
+	spaces = []
+	for line in f:
+		ln = line.strip('\n')
+		this_spaces = [x in spacechar for x in ln]
+		l1 = len(spaces)
+		l2 = len(this_spaces)
+		if l1 >= l2:
+			spaces = [s1 and s2 for s1, s2 in zip(spaces, this_spaces)] + spaces[l2:]
+		else:
+			spaces = [s1 and s2 for s1, s2 in zip(spaces, this_spaces)] + this_spaces[l1:]
+	f.close()
+
+	col_start = [0] if not spaces[0] else []
+	col_end = []
+	for j in range(1, len(spaces)):
+		if spaces[j-1] and (not spaces[j]):
+			col_start.append(j)
+		elif (not spaces[j-1]) and spaces[j]:
+			col_end.append(j)
+	if not spaces[-1]:
+		col_end.append(len(spaces))
+	if len(col_start) != len(col_end):
+		raise ValueError
+	if len(col_start) == 1:
+		sys.stderr.write("ERROR (read_aligned_table): File '%s' is not a properly aligned table.\n" % filename)
+		return None
+
+	try:
+		f = open(filename, 'r')
+	except:
+		sys.stderr.write("ERROR (read_aligned_table): File '%s' does not exist or cannot be read.\n" % filename)
+		return None
+	rawdata = []
+	for line in f:
+		ln = line.strip('\n')
+		l1 = len(ln)
+		thisdata = []
+		for s, e in zip(col_start, col_end):
+			if s >= l1:
+				break
+			thisdata.append(ln[s: min(e, l1)].strip())
+		rawdata.append(thisdata)
+	f.close()
+	return rawdata
+
+def read_csv(filename):
+	"""Read csv (comma separated value) file.
+	We use the csv module from Python. We expect that the separation character
+	is a comma. The function uses several heuristics to split into header, data,
+	and footer parts. Together (in this order) they form the complete data
+	present in the file.
+
+	Note:
+	If no columns are detected, then try the read_aligned_table function.
+
+	Argument:
+	filename   String. The input file name.
+
+	Returns:
+	data    List of list of strings. The raw data for the rows that have been
+	        recognised as proper 'data'.
+	header  List of list of strings. The column headers. This may consist of 0,
+	        1, 2, or more rows (maximally MAX_HEADER_ROWS).
+	footer  List of list of strings. The column footers. This may consist of 0,
+	        1, 2, or more rows (maximally MAX_HEADER_ROWS).
+	"""
+	try:
+		f = open(filename, 'r', newline='')
+	except:
+		sys.stderr.write("ERROR (read_csv): File '%s' does not exist or cannot be read.\n" % filename)
+		return None, None, None
+	csvreader = csv.reader(f)
+	rawdata = [row for row in csvreader]
+	f.close()
+	if max([len(row) for row in rawdata]) < 2:
+		rawdata = read_aligned_table(filename)
+		if rawdata is None:
+			return None, None, None
+
+	# determine number of columns and rows
+	ncol = max([len(row) for row in rawdata])
+	nrow = len(rawdata)
+
+	re_isfloat = re.compile(r'[-+]?(\d+(\.\d*)?|\.\d+)([eE][-+]?\d+)?\s*')
+	is_numeric = np.array([[re_isfloat.fullmatch(x) for x in row] + [False] * (ncol - len(row)) for row in rawdata], dtype = bool)
+	is_empty = np.array([[len(x) == 0 for x in row] + [True] * (ncol - len(row)) for row in rawdata], dtype = bool)
+	is_text = ~is_numeric & ~is_empty
+	col_numeric = np.sum(is_numeric, axis = 0)
+	col_text = np.sum(is_text, axis = 0)
+	col_empty = np.sum(is_empty, axis = 0)
+	ncol_numeric = 0
+	ncol_text = 0
+	for c in range(0, ncol):
+		if col_text[c] >= 4:
+			ncol_text += 1
+		elif col_numeric[c] >= 4:
+			ncol_numeric += 1
+		coltype = 'text' if col_text[c] >= 4 else 'numeric' if col_numeric[c] >= 4 else 'empty'
+
+	row_numeric = np.sum(is_numeric, axis = 1)
+	row_text = np.sum(is_text, axis = 1)
+	non_data_rows = []
+	for r in range(0, nrow):
+		if row_text[r] > ncol_text:
+			non_data_rows.append(r)
+	header_rows = [r for r in non_data_rows if r < MAX_HEADER_ROWS and r < nrow - 2]
+	data_starts = 0 if header_rows == [] else max(header_rows) + 1
+	footer_rows = [r for r in non_data_rows if r >= nrow - MAX_FOOTER_ROWS and r > data_starts + 1]
+	data_ends = nrow if footer_rows == [] else min(footer_rows)
+
+	return rawdata[data_starts:data_ends], rawdata[:data_starts], rawdata[data_ends:]
+
+def read_csv_dict(filename):
+	"""Read csv (comma separated value) file and return a dict with the data.
+	This uses read_csv(). See documentation for that function for more
+	information on how the file is 'parsed'.
+
+	Argument:
+	filename   String. The input file name.
+
+	Returns:
+	data_dict   A dict instance, whose keys are the column headers and whose
+	            data is a list of strings, representing the raw data. The keys
+	            are taken from the first valid header or footer row.
+	"""
+	try:
+		data, header, footer = read_csv(filename)
+	except:
+		return None
+	if data is None or len(data) == 0:
+		return None
+	data_dict = {}
+	if len(header) + len(footer) == 0:
+		ncol = max([len(row) for row in data])
+		for c in range(0, ncol):
+			data_dict[c] = [row[c] for row in data]
+	else:
+		header_full = header + footer
+		if len(header_full) == 1:
+			colheadings = header_full[0]
+		else:
+			colheadings = [h for h in zip(*header_full)]
+		for c in range(0, len(colheadings)):
+			data_dict[colheadings[c]] = [row[c] for row in data]
+	return data_dict
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/tableo/simple.py b/kdotpy-v1.0.0/src/kdotpy/tableo/simple.py
new file mode 100644
index 0000000000000000000000000000000000000000..1527c0248272e83aa4a33303649db895591833ea
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/tableo/simple.py
@@ -0,0 +1,146 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+import sys
+
+from .tools import get_column_headings, get_unit_headings, float_format, get_format
+from .write import write
+from ..momentum import Vector, VectorGrid
+
+def simple(filename, data, float_precision = 5, clabel = None, cunit = None):
+	"""Write a 'simple' table.
+	This plots a rectangular array of data, with appropriate column headers.
+
+	Arguments:
+	filename         String. The output file name.
+	data             Array of two dimensions.
+	float_precision  Integer, string, or 2-tuple. If integer, number of digits
+	                 in floating point output (format %f), if a string, use that
+	                 as formatter for floats. If a tuple (int, str), the int is
+	                 the precision and str the formatter (for example 'e', 'f',
+	                 or 'g').
+	clabel           String or list of strings. Labels for the data columns.
+	cunit            String, list of strings or None. Units associated to the
+	                 columns. If a single string, use the same unit for all data
+	                 columns (not the column for z). If None, do not output
+	                 units.
+
+	No return value.
+	"""
+	columns = get_column_headings(clabel, ncol=len(data))
+	units = get_unit_headings(cunit, ncol=len(data))
+	formats = [get_format(c, float_precision) for c in columns]
+	write(filename, data, formats, columns=columns, units=units)
+	return
+
+def simple2d(filename, xval, yval, data, float_precision = 5, clabel = None, axislabels = None, axisunits = None, datalabel = None, dataunit = None):
+	"""Write a 'simple' two-dimensional array.
+	This function writes the values of a function f(x, y) in a rectangular
+	array. The x values are written in the first row, the y values in the first
+	column.
+
+	Arguments:
+	filename         String. The output file name.
+	xval             Array of one dimension. The x values, written as column
+	                 headers, i.e., as first row.
+	yval             Array of one dimension. The y values, written as row
+	                 labels, i.e., as first column.
+	data             Array of two dimensions. The values f(x, y).
+	float_precision  Integer. Number of digits for floating point numbers.
+	clabel           String. Label that is printed in the upper-left corner
+	                 (first row, first column).
+	axislabels       List of string or None. The labels of the x and y axes.
+	                 These are written at the right end of the first row and the
+	                 bottom end of the first column, respectively. If None, do
+	                 not write these labels.
+	axisunits        List of strings or None. Units associated to the x and y
+	                 axes. These are written at the right end of the first row
+	                 and the bottom end of the first column (i.e., right of and
+	                 below the axes labels), respectively. If None, do not write
+	                 the units.
+	datalabel        String or None. Label that is written at the right end of
+	                 the second row. This should typically be the quantity that
+	                 the data represents. If None, do not write a label.
+	dataunit         String or None. Unit associated to the data. This is
+	                 printed on the second row after the data label.
+
+	No return value.
+	"""
+	if not (isinstance(clabel, str) or clabel is None):
+		raise TypeError("Argument clabel must be a string or None")
+	data = np.asarray(data)
+	if data.ndim != 2:
+		sys.stderr.write("ERROR (tableo.simple2d): Input data is not a 2-dim array.\n")
+		return
+	if data.shape[0] == len(xval) and data.shape[1] == len(yval):
+		data = data.T
+	elif data.shape[0] == len(yval) and data.shape[1] == len(xval):
+		pass
+	else:
+		raise ValueError("Shapes of data, xval, yval do not match")
+
+	if isinstance(xval, VectorGrid):
+		xval = xval.get_values(None)
+	elif isinstance(xval, (list, np.ndarray)) and len(xval) > 0 and isinstance(xval[0], Vector):
+		xval = np.array([k.len() for k in xval])
+
+	## Set formats
+	with np.errstate(divide='ignore'):  # catches 'divide by zero' warning from log10(0)
+		data_size = 0 if np.isnan(data).all() else int(max(np.floor(np.log10(np.nanmax(np.abs(data)))), 0))
+		x_size = 0 if np.isnan(xval).all() else int(max(np.floor(np.log10(np.nanmax(np.abs(xval)))), 0))
+		y_size = 0 if np.isnan(yval).all() else int(max(np.floor(np.log10(np.nanmax(np.abs(yval)))), 0))
+	data_fmt = float_format(float_precision, delta=-data_size)
+	x_fmt = float_format(float_precision, delta=-x_size)
+	y_fmt = float_format(float_precision, delta=-y_size)
+
+	## Compose first column to data and write
+	labeltxt = "" if clabel is None else clabel
+	columns = [x_fmt % x for x in xval]
+	index = [y_fmt % y for y in yval]
+	formats = [data_fmt] * len(xval)
+
+	## Write file
+	write(
+		filename, data.T, formats, columns=columns, index=index,
+		label_text=labeltxt, axislabels=axislabels, axisunits=axisunits,
+		datalabel=datalabel, dataunit=dataunit
+	)
+	return
diff --git a/kdotpy-v1.0.0/src/kdotpy/tableo/tools.py b/kdotpy-v1.0.0/src/kdotpy/tableo/tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..26eaacaed4f8440aec59b47a3e509a0bdbd2a3e3
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/tableo/tools.py
@@ -0,0 +1,347 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import sys
+import numpy as np
+from ..config import get_config, get_config_int
+from ..phystext import format_vector_q, format_vector_unit
+from ..observables import all_observables
+try:
+	import pandas as pd  # noqa: F401 # Ignore import but unused.
+	pd_ver = pd.__version__
+except:
+	HAS_PD = False
+else:
+	HAS_PD = (pd_ver >= '1.0.0')
+
+vector_components = ['', 'x', 'y', 'z', 'phi', 'theta']
+vector_labels = ['k', 'b']
+vector_quantities = [vl + vc for vl in vector_labels for vc in vector_components]
+
+def get_csv_style(key = 'csv_style'):
+	"""Get CSV style from configuration."""
+	# Add xlsx case, fall back if no pandas available
+	csvstyle = get_config(key, choices = ['align', 'csv', 'csvinternal', 'csvpandas']).lower()
+	if csvstyle not in ['align', 'csv', 'csvinternal', 'csvpandas']:
+		csvstyle = 'csvpandas' if HAS_PD else 'csvinternal'
+	elif csvstyle == 'csv':
+		csvstyle = 'csvpandas' if HAS_PD else 'csvinternal'
+	elif csvstyle == 'csvpandas' and not HAS_PD:
+		csvstyle = 'csvinternal'
+		sys.stderr.write("Warning (tableo.get_csv_style): Requested csv style 'csvpandas' cannot be used because pandas is missing. Fall back to '%s'.\n" % csvstyle)
+	return csvstyle
+
+def get_label_unit_style():
+	"""Get label and unit style for tables from the configuration"""
+	label_style = get_config('table_data_label_style', choices = ['none', 'false', 'raw', 'plain', 'unicode', 'tex'])
+	unit_style = 'none' if label_style == 'none' else get_config('table_data_unit_style', choices = ['none', 'false', 'raw', 'plain', 'unicode', 'tex'])
+	return label_style, unit_style
+
+def get_precision(key, default = 0):
+	"""Get precision for table output from configuration."""
+	precision = get_config_int(key)
+	if precision < 0:
+		sys.stderr.write("Warning (get_precision): Precision for table output (option '%s') must be an integer >= 0.\n" % key)
+		precision = default
+	return precision
+
+def parse_float_precision(float_precision):
+	"""Extract float precision and style from a float_precision value
+
+	Arguments:
+	float_precision  Integer, string, or tuple (int, str).
+
+	Returns:
+	float_n   Integer float precision in number of digits.
+	float_f   String of length 1. Format specifier.
+	"""
+	if isinstance(float_precision, int):
+		float_n, float_f = float_precision, 'f'
+	elif isinstance(float_precision, str):
+		float_n, float_f = None, float_precision
+	elif isinstance(float_precision, tuple) and len(float_precision) == 2:
+		if isinstance(float_precision[0], int) and isinstance(float_precision[1], str):
+			float_n, float_f = float_precision
+		else:
+			raise ValueError("If argument float_precision is a tuple, it must be of the form (int, str).")
+	else:
+		raise TypeError("Argument float_precision must be an integer, string, or tuple")
+	if float_f.lower() not in ['e', 'f', 'g']:
+		sys.stderr.write("Warning (parse_float_precision): Only the formats e, f, g are suitable for floating point values.\n")
+	return float_n, float_f
+
+def float_format(float_precision, delta = 0):
+	"""Get % style formatting string for floating point numbers
+
+	Argument:
+	float_precision  Integer, string, or tuple (int, str).
+	delta            Integer. If the number of digits is defined by
+	                 float_precision, change it by this number. Use negative
+	                 values to decrease the number of digits.
+	"""
+	float_n, float_f = parse_float_precision(float_precision)
+	if float_n is None:
+		return "%%%s" % float_f
+	else:
+		n = max(float_n + delta, 0)
+		return "%%.%i%s" % (n, float_f)
+
+def get_format(q, float_precision = 5, degrees = True):
+	"""Get data formats for tables.
+
+	We use % style formatting rather than {} style formatting because the former
+	is about 30% faster. Note that for dispersion tables, there is a custom
+	function defined in tableo.disp.
+
+	Arguments:
+	q                String. Quantity (column id).
+	float_precision  Integer. Number of digits for floating point numbers.
+	degrees          True or False. Whether to express angles in degrees (True)
+	                 or radians (False).
+
+	Returns:
+	fmt   The format string, to be used as fmt % value.
+	"""
+	if not isinstance(q, str):
+		raise TypeError("Argument q must be a string")
+	elif q.endswith('index'):
+		fmt = '%i'
+	elif q in ['char', 'minmax']:
+		fmt = '%s'
+	elif q.startswith('E'):
+		fmt = float_format(float_precision, delta=-2)
+	elif q == 'z':
+		fmt = float_format(float_precision, delta=-2)
+	elif q.startswith('exch_y'):
+		fmt = float_format(float_precision, delta=-1)
+	elif degrees and q in ['kphi', 'bphi', 'ktheta', 'btheta']:
+		fmt = float_format(float_precision, delta=-2)
+	else:
+		fmt = float_format(float_precision)
+
+	return fmt
+
+def bandlabel_to_fileid(bandlabel):
+	"""Convert band label (band index or LL + band index) to label for filename."""
+	if isinstance(bandlabel, (int, np.integer)):
+		return "b%+i" % bandlabel
+	elif isinstance(bandlabel, tuple) and len(bandlabel) == 2:
+		return "ll%+i-b%+i" % bandlabel
+	else:
+		return "xx"
+
+def get_column_headings(clabel, ncol):
+	"""Parse clabel input to column headers for label (quantity)"""
+	if isinstance(clabel, str):
+		columns = ["%s%i" % (clabel, i) for i in range(ncol)]
+	elif isinstance(clabel, list) and len(clabel) == ncol:
+		if any(isinstance(col, tuple) for col in clabel):
+			columns = clabel
+		else:
+			columns = ["%s" % s for s in clabel]
+	else:
+		columns = ["q%i" % i for i in range(ncol)]
+		sys.stderr.write("Warning (tableo.get_column_headings): Column headings could not be determined.\n")
+	return columns
+
+def get_unit_headings(cunit, ncol):
+	"""Parse cunit input to column headers for units"""
+	if isinstance(cunit, str):
+		units = [cunit for _ in range(ncol)]
+	elif isinstance(cunit, list) and len(cunit) == ncol:
+		if any(isinstance(col, tuple) for col in cunit):
+			units = cunit
+		else:
+			units = ["%s" % s for s in cunit]
+	else:
+		units = None
+	return units
+
+def get_column_widths(data, formats, *args, index=None):
+	if isinstance(data, dict):
+		data = list(data.values())
+	if index is None:
+		index = []
+	if len(index) > 0 and len(data) == len(formats):
+		formats = ["%s"] * len(index) + formats
+	if len(data) + len(index) != len(formats):
+		raise ValueError(f"Arguments data, index, and formats have incompatible lengths, {len(data)} + {len(index)} != {len(formats)}")
+
+	widths = []
+	for fmt, col in zip(formats, (*index, *data)):
+		widths.append(max(len(fmt % x) for x in col))
+
+	for arg in args:
+		if arg is None:
+			continue
+		if not isinstance(arg, (tuple, list, np.ndarray)):
+			raise TypeError("Argument must be a tuple, list, or array")
+		if len(arg) == len(widths):
+			widths = [max(w, len(col)) for w, col in zip(widths, arg)]
+		elif len(arg) == len(data):
+			extarg = [""] * len(index) + arg
+			widths = [max(w, len(col)) for w, col in zip(widths, extarg)]
+		else:
+			raise ValueError("All arguments must have the same length")
+	return widths
+
+def format_column_headings(columns, widths = None, where = None):
+	"""Format column headings
+
+	Arguments:
+	columns   List of strings. Column labels.
+	widths    List of integers or None. If set, align to these column widths. If
+	          None, do not align.
+	where     'l' ,'r', 'c', 'left', 'right', 'center', or None. Where to align
+	          the column labels. None is equivalent to 'l'.
+
+	Returns:
+	colheadings   List of strings. The column labels. If widths is set, then it
+	              also contains	extra whitespace for proper alignment. The
+	              string that should be written to the output file is
+	              sep.join(colheadings), where sep is the column separator.
+	"""
+	if widths is None or (isinstance(widths, list) and all([w == 0 for w in widths])):
+		return columns
+	if where is None or where in ['l', 'left']:
+		colheadings = [c.ljust(w) for c, w in zip(columns, widths)]
+	elif where in ['r', 'right']:
+		colheadings = [c.rjust(w) for c, w in zip(columns, widths)]
+	elif where in ['c', 'center']:
+		colheadings = [c.center(w) for c, w in zip(columns, widths)]
+	else:
+		raise ValueError("Invalid value for argument 'where'")
+	return colheadings
+
+def format_row(columns, sep, quote = '\"', widths = None, where = None):
+	"""Format one row in an array
+
+	columns   List of strings. The entries to be printed.
+	sep       String. Column separator.
+	quote     String. The string that serves as quote character, used when the
+	          data contains sep.
+	widths    List of integers or None. If set, align to these column widths. If
+	          None, do not align.
+	where     'l' ,'r', 'c', 'left', 'right', 'center', or None. Where to align
+	          the column labels. None is equivalent to 'l'.
+
+	Returns:
+	A string.
+	"""
+	if widths is None or (isinstance(widths, list) and all([w == 0 for w in widths])):
+		return sep.join([quote + c.replace(quote, quote + quote) + quote if sep in c else c for c in columns])
+	else:
+		return sep.join(format_column_headings(columns, widths = widths, where = where))
+
+def format_quantity_and_unit(q, style=None, unit_style=None, dimful=None, degrees=True):
+	"""Format quantity and its unit
+
+	Arguments:
+	q            String. Quantity, i.e., a column header in raw format.
+	style        String (one of 'raw', 'plain', 'unicode', 'tex') or None. If
+	             set, the formatting style for vector components and
+	             observables. If None, extract it from the configuration value.
+	unit_style   String (one of 'raw', 'plain', 'unicode', 'tex') or None. If
+	             set, the formatting style for units. If None, extract it from
+	             the configuration value.
+	dimful       True, False, or None. If None, take it from all_observables.
+	degrees      True or False. Determines the unit for the angular vector
+	             components.
+
+	Returns:
+	List of strings. Formatted column headers.
+	"""
+	if style is None:
+		style = get_config('table_dispersion_obs_style', choices=['raw', 'plain', 'unicode', 'tex'])
+	if unit_style is None:
+		unit_style = get_config('table_dispersion_unit_style', choices=['raw', 'plain', 'unicode', 'tex'])
+	if dimful is None:
+		dimful = all_observables.dimful is True
+	if q in ['e', 'E'] or q is None:
+		qstr = "$E$" if style == 'tex' else "E"
+		ustr = r"$\mathrm{meV}$" if unit_style == 'tex' else "meV"
+	elif q in vector_quantities:
+		qstr = format_vector_q(q, style=style)
+		ustr = format_vector_unit(q, style=style, degrees=degrees)
+	elif q in all_observables:
+		obs = all_observables[q]
+		qstr = obs.to_str(style=style, dimful=dimful, index_from=q)
+		ustr = obs.get_unit_str(style=unit_style, dimful=dimful)
+	elif q.startswith('mass'):
+		if style == 'tex':
+			qstr = "$m^*$" if len(q) == 4 else f"$m^*_{q[4]}$" if len(q) == 5 else f"$m^*_{{{q[4:]}}}$"
+			ustr = "$m_0$"
+		elif style == 'plain':
+			qstr, ustr = "m*" + q[4:], "m_0"
+		elif style == 'unicode':
+			qstr, ustr = "m*" + q[4:], "m\u2080"
+		else:
+			qstr, ustr = q, "m0"
+	else:
+		qstr, ustr = q, ""
+	return qstr, ustr
+
+def vector_units(comp, degrees = True):
+	"""Wrapper around phystext.format_vector_unit()"""
+	unit_style = get_config('table_dispersion_unit_style', choices = ['raw', 'plain', 'unicode', 'tex'])
+	if isinstance(comp, str):
+		return format_vector_unit(comp, style=unit_style, degrees=degrees)
+	elif isinstance(comp, (list, tuple, np.ndarray)):
+		return [format_vector_unit(co, style=unit_style, degrees=degrees) for co in comp]
+	else:
+		raise TypeError("Argument comp must be str or list, tuple, array")
+
+def get_bandlabel_position():
+	"""Get position of band label in the CSV file (which row) from configuration."""
+	pos = get_config('csv_bandlabel_position', choices = ['top', 'above', 'second', 'between', 'below'])
+	if pos is None:
+		sys.stderr.write("Warning (get_bandlabel_position): Absent configuration value 'csv_bandlabel_position'. Using default value 'top'.\n")
+		return 0
+	if pos.lower() in ['top', 'above']:
+		return 0
+	elif pos.lower() in ['second', 'between']:
+		return 1
+	elif pos.lower() in ['bottom', 'below']:
+		return 'end'
+	else:
+		sys.stderr.write("Warning (get_bandlabel_position): Invalid configuration value '%s' for 'csv_bandlabel_position'. Using default value 'top'.\n" % pos)
+		return 0
+
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/tableo/wf.py b/kdotpy-v1.0.0/src/kdotpy/tableo/wf.py
new file mode 100644
index 0000000000000000000000000000000000000000..256115615e1c266ea6bac27fc26a689019540f73
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/tableo/wf.py
@@ -0,0 +1,514 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import sys
+import os
+
+import numpy as np
+
+from ..config import get_config_int, get_config
+from ..phystext import orbital_labels
+from .auxil import q_z
+from .simple import simple, simple2d
+from .postwrite import create_archive
+
+### HELPER FUNCTION ###
+
+def get_unique_filenames(filenames, fmt="_%i"):
+	"""Make a list of unique filenames by appending fmt (containing an integer number)"""
+	unique_filenames = []
+	counter = {}
+	for fn in filenames:
+		if filenames.count(fn) > 1:
+			if fn in counter:
+				counter[fn] += 1
+			else:
+				counter[fn] = 1
+			unique_filenames.append(fn + (fmt % counter[fn]))
+		else:
+			unique_filenames.append(fn)
+	return unique_filenames
+
+### WAVE FUNCTION TABLES ###
+
+def wavefunction_z(params, diagdatapoint, filename = "", absval = False, title = None, eivalrange = None, bandlabels = None, display_k = None, precision = None):
+	"""Table of wave functions psi(z), wrapper version.
+	For each state, provide a separate file. In each file, the first column is
+	the z value, the subsequent ones the real and imaginary parts of the wave
+	function value in each orbital.
+
+	Note:
+	The configuration setting 'table_wf_files' may be used to disable this
+	function or to gather all csv files into a tar or a zip file.
+
+	Arguments:
+	params         PhysParams instance. Used to extract nz, ny, resolutions and
+	               number of orbitals.
+	diagdatapoint  DiagDataPoint instance.
+	filename       String. The output file name.
+	absval         NOT USED
+	title          NOT USED
+	eivalrange     2-tuple or None. If set, write wave function data only for
+	               states with eigenvalue in that range.
+	bandlabels     NOT USED
+	display_k      Vector or None. If set, test whether the vector is zero. If
+	               not, suppress character warning. (NOT USED?)
+	precision      Integer or None. Number of digits for floating point numbers.
+	               If None, use the configuration value.
+
+	No return value.
+	"""
+	if precision is None:
+		precision = get_config_int('table_wf_precision', minval = 2)
+	nz = params.nz
+	ny = params.ny
+	norb = params.norbitals
+	# suppress_character_warning = (isinstance(display_k, Vector) and not display_k == 0)
+	ddp1 = diagdatapoint.sort_by_eival()
+	wf_format = get_config('table_wf_files', choices = ['none', 'csv', 'tar', 'gz', 'gzip', 'targz', 'tar.gz', 'zip', 'zipnozip'])
+	fname, fext = os.path.splitext(filename)
+	all_files = []
+	if wf_format == 'none':  # skip writing csv files
+		return
+	if isinstance(eivalrange, (list, tuple)) and len(eivalrange) == 2:
+		emin, emax = min(eivalrange), max(eivalrange)
+	else:
+		emin, emax = -np.inf, np.inf
+
+	if ddp1.neig != ddp1.eivec.shape[1]:
+		raise ValueError(f"Invalid shape for ddp.eivec. It does not match ddp.neig = {ddp1.neig}.")
+	if ddp1.eivec.shape[0] == norb * ny * nz:  # for 1D
+		dim = 1
+	elif ddp1.eivec.shape[0] == norb * nz:  # for 2D
+		dim = 2
+		ny = 1
+	else:
+		raise ValueError("Eigenvectors have incorrect number of components.")
+
+	filenames = []
+	for j in range(0, ddp1.neig):
+		energy = ddp1.eival[j]
+		if energy < emin or energy > emax:
+			filenames.append("")
+			continue
+		if ddp1.llindex is not None and ddp1.char is not None and '?' not in ddp1.char[j]:
+			bandlabel = "%s.%i" % (ddp1.char[j], ddp1.llindex[j])
+		elif ddp1.char is not None and '?' not in ddp1.char[j]:
+			bandlabel = ddp1.char[j]
+		elif ddp1.llindex is not None and ddp1.bindex is not None:
+			bandlabel = "%i.%i" % (ddp1.llindex[j], ddp1.bindex[j])
+		elif ddp1.bindex is not None:
+			bandlabel = "%i" % ddp1.bindex[j]
+		else:
+			bandlabel = "(%i)" % j
+		filenames.append(f"{fname}.{bandlabel}")
+	filenames = [f"{fn}{fext}" for fn in get_unique_filenames(filenames)]
+
+	for j in range(0, ddp1.neig):
+		eivec = ddp1.eivec[:, j]
+		energy = ddp1.eival[j]
+		if energy < emin or energy > emax:
+			continue
+
+		if dim == 1 and ny > 1:  # for 1D
+			eivec0 = np.reshape(eivec, (ny, norb * nz))
+			eivec = eivec0[ny // 2, :]  # take a section in the middle
+			nrm = np.vdot(eivec, eivec)
+			eivec /= np.sqrt(nrm)
+			# print (eivec.shape)
+
+		wfdata = []
+		orblabels = orbital_labels(style = 'unicode', norb = norb)
+		heading = []
+		subheading = []
+		# Try to make largest component purely real
+		psimax = eivec[np.argmax(np.abs(eivec))]
+		phase = psimax / abs(psimax)
+
+		for b in range(0, norb):
+			psi = eivec[b::norb]
+			psi2 = np.vdot(psi, psi)
+			if precision is not None and np.sum(psi2) < 10**-precision:
+				continue
+			wfdata.append(np.real(psi / phase))
+			wfdata.append(np.imag(psi / phase))
+			heading.append(orblabels[b])
+			heading.append(orblabels[b])
+			subheading.append("Re \u03c8_i")  # Re psi_i
+			subheading.append("Im \u03c8_i")  # Im psi_i
+
+		wfdata = np.array(wfdata)
+		q_z(filenames[j], params, wfdata, clabel = heading, units = subheading, precision = precision)
+		all_files.append(filenames[j])
+
+	if len(all_files) == 0:
+		sys.stderr.write("Warning (tableo.wavefunction_z): No output files have been written.\n")
+	elif wf_format in ['tar', 'gz', 'gzip', 'targz', 'tar.gz', 'zip', 'zipnozip']:
+		archive_file = fname + ("--csv.zip" if 'zip' in wf_format else "--csv.tar.gz" if 'gz' in wf_format else "--csv.tar")
+		create_archive(archive_file, all_files, fmt = wf_format)
+	return
+
+
+def abs_wavefunctions_z(params, diagdatapoint, filename = "", absval = False, title = None, eivalrange = None, num = None, bandlabels = None, display_k = None, precision = None):
+	"""Table of wave functions |psi(z)|^2, wrapper version.
+	Each column represents a wave function, i.e., its probability density at z.
+	(This function provides a single file, unlike wavefunctions_z().)
+
+	Arguments:
+	params         PhysParams instance. Used to extract nz, ny, resolutions and
+	               number of orbitals.
+	diagdatapoint  DiagDataPoint instance.
+	filename       String. The output file name.
+	absval         NOT USED
+	title          NOT USED
+	eivalrange     2-tuple or None. If set, write wave function data only for
+	               states with eigenvalue in that range.
+	num            Integer or None. If set, write wave function data only for
+	               this many states near the middle of the eivalrange.
+	bandlabels     NOT USED
+	display_k      Vector or None. If set, test whether the vector is zero. If
+	               not, suppress character warning. (NOT USED?)
+	precision      Integer or None. Number of digits for floating point numbers.
+	               If None, use the configuration value.
+
+	No return value.
+	"""
+	if precision is None:
+		precision = get_config_int('table_wf_precision', minval = 2)
+	nz = params.nz
+	ny = params.ny
+	dz = params.zres
+	norb = params.norbitals
+	ddp1 = diagdatapoint.sort_by_eival()
+	if isinstance(eivalrange, (list, tuple)) and len(eivalrange) == 2:
+		emin, emax = min(eivalrange), max(eivalrange)
+	else:
+		emin, emax = -np.inf, np.inf
+
+	if ddp1.neig != ddp1.eivec.shape[1]:
+		raise ValueError(f"Invalid shape for ddp.eivec. It does not match ddp.neig = {ddp1.neig}.")
+	if ddp1.eivec.shape[0] == norb * ny * nz:  # for 1D
+		dim = 1
+	elif ddp1.eivec.shape[0] == norb * nz:  # for 2D
+		dim = 2
+		ny = 1
+	else:
+		raise ValueError("Eigenvectors have incorrect number of components.")
+
+	if eivalrange is None:
+		e0 = 0.0
+		sel = np.argsort(np.abs(ddp1.eival - e0))  # [:min(neig, num)]
+		if num < len(sel):
+			sel = sel[:num]   # restrict to maximum number
+		order = np.argsort(ddp1.eival[sel])
+		sel = sel[order]
+	else:
+		e0 = (emin + emax) / 2
+		sel = np.argsort(np.abs(ddp1.eival - e0))  # sort by distance to e0
+		sel = sel[(ddp1.eival[sel] >= emin) & (ddp1.eival[sel] <= emax)]  # restrict to eigenvalue range
+		if num is not None and num < len(sel):
+			sel = sel[:num]   # restrict to maximum number
+		order = np.argsort(ddp1.eival[sel])
+		sel = sel[order]
+
+	if len(sel) == 0:
+		sys.stderr.write("Warning (tableo.abs_wavefunctions_z): No eigenstates to be output.\n")
+		return
+
+	wfdata = []
+	heading = []
+	subheading = []
+
+	for j in sel:
+		eivec = ddp1.eivec[:, j]
+		energy = ddp1.eival[j]
+
+		if dim == 1 and ny > 1:  # for 1D
+			eivec0 = np.reshape(eivec, (ny, norb * nz))
+			eivec = eivec0[ny // 2, :]  # take a section in the middle
+			nrm = np.vdot(eivec, eivec)
+			eivec /= np.sqrt(nrm)
+
+		eivec2 = np.real(eivec.conjugate() * eivec)  # Not a matrix multiplication!
+		eivec2a = eivec2.reshape(nz, norb, order = 'C')
+		psi2 = np.sum(eivec2a, axis = 1) / dz
+
+		wfdata.append(psi2)
+		bandlabel = ddp1.char[j] if ddp1.char is not None and '?' not in ddp1.char[j] else ddp1.bindex[j] if ddp1.bindex is not None else ("(%i)" % j)
+		heading.append("%s (%.1f meV)" % (bandlabel, energy))
+		subheading.append("|\u03c8|\u00b2")  # |psi|^2
+
+	wfdata = np.array(wfdata)
+	q_z(filename, params, wfdata, clabel = heading, units = subheading, precision = precision)
+	return
+
+
+def abs_wavefunctions_y(params, diagdatapoint, filename = "", absval = False, title = None, eivalrange = None, bandlabels = None, overlap_eivec = None, precision = None):
+	"""Table of wave functions |psi(y)|^2, wrapper version.
+	Each column represents a wave function, i.e., its probability density at y.
+	This function also saves additional files per eigenstate with the wave
+	functions split by orbital (and optionally, by subband).
+
+	Arguments:
+	params         PhysParams instance
+	diagdatapoint  DiagDataPoint instance.
+	filename       String. The output file name for the file where all total
+	               probability densities are saved. The same string is also used
+	               for generating the per-state data file.
+	eivalrange     None or a 2-tuple. If set, do not plot wave functions for the
+	               states whose eigenvalues lie outside this range.
+	bandlabels     NOT USED
+	overlap_eivec  A dict instance. The keys are the subband labels, the values
+	               are arrays representing the eigenvector. If given, include
+	               the decomposition into subbands in the per-state data file.
+	precision      Integer or None. Number of digits for floating point numbers.
+	               If None, use configuration value.
+
+	No return value.
+	"""
+	if precision is None:
+		precision = get_config_int('table_wf_precision', minval = 2)
+	wf_format = get_config('table_wf_files', choices = ['none', 'csv', 'tar', 'gz', 'gzip', 'targz', 'tar.gz', 'zip', 'zipnozip'])
+	fname, fext = os.path.splitext(filename)
+	all_files = []
+	if wf_format == 'none':  # skip writing csv files
+		return
+	if isinstance(eivalrange, (list, tuple)) and len(eivalrange) == 2:
+		emin, emax = min(eivalrange), max(eivalrange)
+	else:
+		emin, emax = -np.inf, np.inf
+
+	nz = params.nz
+	ny = params.ny
+	dy = params.yres
+	norb = params.norbitals
+	ddp1 = diagdatapoint.sort_by_eival()
+
+	if ddp1.neig != ddp1.eivec.shape[1]:
+		raise ValueError(f"Invalid shape for ddp.eivec. It does not match ddp.neig = {ddp1.neig}.")
+	if ddp1.eivec.shape[0] == norb * ny * nz:  # for 1D
+		dim = 1
+	elif ddp1.eivec.shape[0] == norb * nz:  # for 2D
+		dim = 2
+		ny = 1
+	else:
+		raise ValueError("Eigenvectors have incorrect number of components.")
+
+	if ny <= 1 or dim == 2:
+		sys.stderr.write("Warning (tableo.abs_wavefunctions_y): No y dimension.\n")
+		return
+
+	filenames = []
+	for j in range(0, ddp1.neig):
+		energy = ddp1.eival[j]
+		if energy < emin or energy > emax:
+			filenames.append("")
+			continue
+		energy_int = int(round(energy))
+		filenames.append(f"{fname}.{energy_int:+d}meV")
+	filenames = [f"{fn}{fext}" for fn in get_unique_filenames(filenames)]
+
+	wf_alldata = []
+	wf_energies = []
+	y = params.yvalues_nm()
+	for j in range(0, ddp1.neig):
+		eivec = ddp1.eivec[:, j]
+		energy = ddp1.eival[j]
+		if energy < emin or energy > emax:
+			continue
+
+		eivec = np.reshape(eivec, (ny, nz, norb))
+		thisdata = [y]
+		columns = ['y', 'sum']
+
+		# Full wave function (for a separate file)
+		wf_energies.append(energy)
+		psi2_sum = np.sum(np.abs(eivec)**2, axis = (1, 2)) / dy
+		wf_alldata.append(psi2_sum)
+		thisdata.append(psi2_sum)
+
+		# Orbital overlap
+		columns += orbital_labels(style = 'unicode', norb = norb)
+		for b in range(0, norb):
+			psi = eivec[:, :, b]
+			psi2 = np.sum(np.abs(psi)**2, axis = 1)
+			thisdata.append(psi2 / dy)
+
+		if overlap_eivec is not None:  # Subband overlap
+			eivec = np.reshape(eivec, (ny, nz * norb))
+			absv2 = np.sum(np.abs(eivec)**2)
+			total_ei = np.sum(np.abs(eivec)**2, axis=1) / absv2
+			total_ov = np.zeros_like(total_ei)
+			for ov in overlap_eivec:      # overlap_eivec should be a dict
+				ovec = overlap_eivec[ov]  # this is the data; argument ov is the label
+				absw2 = np.sum(np.abs(ovec)**2)
+				psi = np.inner(eivec.conjugate(), ovec)
+				# print ('%i (%s):' % (jj+1, ov), eivec.shape, ovec.shape, '->', psi.shape, '->')
+				psi2 = np.abs(psi)**2 / absv2 / absw2
+				total_ov += psi2
+				thisdata.append(psi2 / dy)
+				columns.append(ov)
+			other_ov = total_ei - total_ov
+			thisdata.append(other_ov / dy)
+			columns.append('other')
+
+		subheading = ['nm'] + ["|\u03c8|\u00b2" for c in columns[1:]]  # |psi|^2
+		simple(filenames[j], thisdata, float_precision = precision, clabel = columns, cunit = subheading)
+		all_files.append(filenames[j])
+
+	if len(wf_alldata) == 0:
+		sys.stderr.write("Warning (tableo.wavefunction_y): No output files have been written.\n")
+		return
+
+	alldata = np.concatenate(([y], np.array(wf_alldata)))
+	heading = ['y'] + ["%.2f meV" % e for e in wf_energies]
+	subheading = ['nm'] + ["|\u03c8|\u00b2" for e in wf_energies]  # |psi|^2
+	simple(filename, alldata, float_precision = precision, clabel = heading, cunit = subheading)
+	all_files.append(filename)
+
+	if wf_format in ['tar', 'gz', 'gzip', 'targz', 'tar.gz', 'zip', 'zipnozip']:
+		archive_file = fname + ("--csv.zip" if 'zip' in wf_format else "--csv.tar.gz" if 'gz' in wf_format else "--csv.tar")
+		create_archive(archive_file, all_files, fmt = wf_format)
+	return
+
+
+def wavefunction_zy(params, diagdatapoint, filename = "", absval = True, separate_bands = False, title = None, eivalrange = None, precision = None):
+	"""Table of wave functions |psi(z, y)|^2, wrapper version.
+	For each eigenstate, compose a two-dimensional table with the y coordinates
+	in the columns and z coordinates in the rows.
+
+	Arguments:
+	params         PhysParams instance
+	diagdatapoint  DiagDataPoint instance.
+	filename       String. The output file name for the file where all total
+	               probability densities are saved. The same string is also used
+	               for generating the per-state data file.
+	absval         NOT USED
+	separate_bands  If False, sum absolute value squared over the orbitals.
+	                If True, provide data for each orbital separately.
+	title          NOT USED
+	eivalrange     None or a 2-tuple. If set, do not plot wave functions for the
+	               states whose eigenvalues lie outside this range.
+	precision      Integer or None. Number of digits for floating point numbers.
+	               If None, use configuration value.
+
+	No return value.
+	"""
+	if precision is None:
+		precision = get_config_int('table_wf_precision', minval = 2)
+	wf_format = get_config('table_wf_files', choices = ['none', 'csv', 'tar', 'gz', 'gzip', 'targz', 'tar.gz', 'zip', 'zipnozip'])
+	fname, fext = os.path.splitext(filename)
+	all_files = []
+	if wf_format == 'none':  # skip writing csv files
+		return
+	if isinstance(eivalrange, (list, tuple)) and len(eivalrange) == 2:
+		emin, emax = min(eivalrange), max(eivalrange)
+	else:
+		emin, emax = -np.inf, np.inf
+
+	nz = params.nz
+	ny = params.ny
+	dz = params.zres
+	dy = params.yres
+	z = params.zvalues_nm()
+	y = params.yvalues_nm()
+	norb = params.norbitals
+	labels = {'axislabels': ['z', 'y'],	'axisunits': ['nm', 'nm'], 'datalabel': '|psi|^2', 'dataunit': 'nm^-2'}
+
+	ddp1 = diagdatapoint.sort_by_eival()
+	if ddp1.neig != ddp1.eivec.shape[1]:
+		raise ValueError(f"Invalid shape for ddp.eivec. It does not match ddp.neig = {ddp1.neig}.")
+	if ddp1.eivec.shape[0] == norb * ny * nz:  # for 1D
+		dim = 1
+	elif ddp1.eivec.shape[0] == norb * nz:  # for 2D
+		dim = 2
+		ny = 1
+	else:
+		raise ValueError("Eigenvectors have incorrect number of components.")
+
+	if ny <= 1 or dim == 2:
+		sys.stderr.write("Warning (tableo.wavefunction_zy): No y dimension.\n")
+		return
+
+	filenames = []
+	for j in range(0, ddp1.neig):
+		energy = ddp1.eival[j]
+		if energy < emin or energy > emax:
+			filenames.append("")
+			continue
+		energy_int = int(round(energy))
+		filenames.append(f"{fname}.{energy_int:+d}meV")
+	filenames = [f"{fn}{fext}" for fn in get_unique_filenames(filenames)]
+
+	wf_energies = []
+	for j in range(0, ddp1.neig):
+		energy = ddp1.eival[j]
+		if energy < emin or energy > emax:
+			continue
+		wf_energies.append(energy)
+
+		# Full wave function
+		eivec = np.reshape(ddp1.eivec[:, j], (ny, nz, norb))
+		if separate_bands:
+			eivecdata = np.abs(eivec)**2 / dy / dz
+			eivecdata = eivecdata.transpose(2, 0, 1).reshape(ny * norb, nz)
+			oval = np.repeat(np.arange(0, norb), ny)  # TODO: For future use.
+			yval = np.tile(y, norb)
+		else:
+			eivecdata = np.sum(np.abs(eivec)**2, axis = 2).T / dy / dz
+			yval = y
+
+		clabel = "%.3f meV" % energy
+		# TODO: For separate_bands = True, the data now appears as norb tables
+		# in succession, with the orbital DOF unlabelled. As of now, simple2d
+		# does not support multi-indexing for row and column headers. An
+		# alternative solution would be to put each orbital on a different
+		# worksheet (infrastructure is also not available).
+		simple2d(
+			filenames[j], z, yval, eivecdata, float_precision = precision,
+			clabel = clabel, **labels)
+		all_files.append(filenames[j])
+
+	if len(all_files) == 0:
+		sys.stderr.write("Warning (tableo.wavefunction_zy): No output files have been written.\n")
+	elif wf_format in ['tar', 'gz', 'gzip', 'targz', 'tar.gz', 'zip', 'zipnozip']:
+		archive_file = fname + ("--csv.zip" if 'zip' in wf_format else "--csv.tar.gz" if 'gz' in wf_format else "--csv.tar")
+		create_archive(archive_file, all_files, fmt = wf_format)
+	return
diff --git a/kdotpy-v1.0.0/src/kdotpy/tableo/write.py b/kdotpy-v1.0.0/src/kdotpy/tableo/write.py
new file mode 100644
index 0000000000000000000000000000000000000000..a97334f6f71f4daff7e23554450f9a668f1822eb
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/tableo/write.py
@@ -0,0 +1,322 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+import csv
+
+from collections import Counter
+
+from .tools import get_column_widths, format_row, get_csv_style, get_bandlabel_position
+from .postwrite import write_axislabels, write_extraheader
+
+try:
+	import pandas as pd  # noqa: F401 # Ignore import but unused.
+	pd_ver = pd.__version__
+except:
+	HAS_PD = False
+else:
+	HAS_PD = (pd_ver >= '1.0.0')
+
+### HELPER FUNCTIONS ###
+
+def parse_index_csv(index=None):
+	"""Parse index into an empty or a two-dimensional list"""
+	if isinstance(index, (list, np.ndarray)):
+		if all(isinstance(x, (list, np.ndarray)) for x in index):
+			pass
+		elif not any(isinstance(x, (list, np.ndarray)) for x in index):
+			index = [index]
+		else:
+			raise TypeError("Non-uniform types of index array")
+	elif index is None:
+		index = []
+	else:
+		raise TypeError("Invalid type for argument index")
+	return index
+
+def parse_label_text_csv(columns, index, label_text=None):
+	"""Insert label text into list of column labels (top left of table)"""
+	columns = [""] * len(index) + columns
+	if label_text is None or len(index) == 0:
+		return columns
+	if isinstance(label_text, str):
+		columns[0] = label_text
+	elif isinstance(label_text, (list, np.ndarray)):
+		l = min(len(label_text), len(index))
+		columns[:l] = label_text[:l]
+	else:
+		raise TypeError("Invalid type for argument index_label")
+	return columns
+
+def extract_float_format(formats, default='%g'):
+	"""Extract the most common element in a list of format string that can be used for float values"""
+	float_formats = [fmt for fmt in formats if fmt.startswith('%') and fmt[-1] in ['e', 'f', 'g']]
+	if len(float_formats) == 0:
+		return default
+	else:
+		return Counter(float_formats).most_common(1)[0][0]  # using collections.Counter()
+
+def get_index_ncol(index):
+	"""Get number of columns associated to index (pandas)"""
+	if index is None:
+		return 0
+	elif HAS_PD and isinstance(index, pd.MultiIndex):
+		return index.nlevels
+	elif isinstance(index, (list, np.ndarray)):
+		if all(isinstance(x, (list, np.ndarray)) for x in index):
+			return len(index)
+		elif not any(isinstance(x, (list, np.ndarray)) for x in index):
+			return 1
+		else:
+			raise TypeError("Non-uniform types of index array")
+	else:
+		raise TypeError("Invalid type for argument index")
+
+def parse_index_pandas(index=None):
+	"""Parse index into an empty or a two-dimensional list"""
+	if isinstance(index, (list, np.ndarray)):
+		if all(isinstance(x, (list, np.ndarray)) for x in index):
+			index = pd.MultiIndex.from_arrays(index)
+		elif not any(isinstance(x, (list, np.ndarray)) for x in index):
+			pass
+		else:
+			raise TypeError("Non-uniform types of index array")
+	elif index is None:
+		pass
+	else:
+		raise TypeError("Invalid type for argument index")
+	return index
+
+def parse_label_text_pd(index, label_text=None):
+	"""Insert label text into list of column labels (top left of table)"""
+	l = get_index_ncol(index)
+	if l == 0:
+		return None
+	label_text_out = [""] * l
+	if isinstance(label_text, str):
+		label_text_out[0] = label_text
+	elif isinstance(label_text, (list, np.ndarray)):
+		l = min(len(label_text), l)
+		label_text_out[:l] = label_text[:l]
+	else:
+		raise TypeError("Invalid type for argument index_label")
+	return label_text_out
+
+def fmtnan(fmt, x, nanstr=''):
+	"""Apply fmt % x or return empty string if the value is a floating-point NaN
+
+	We use fmt % x rather than fmt.format(x) because the former is significantly
+	(about 30%) faster.
+	"""
+	return (fmt % x).replace('nan', nanstr) if fmt[-1] in 'efg' else (fmt % x)
+
+### WRITERS ###
+
+def csvwrite(filename, data, formats, columns=None, units=None, index=None, label_text=None, sep=','):
+	"""Basic writer for column based data to a csv (comma separated values) file
+
+	Arguments:
+	filename    String. The output file name.
+	data        Dict, list, or array. The column data.
+	formats     List of strings. Format specifiers fmt that can be applied to
+	            values x as fmt % x.
+	columns     List of strings. The column headers.
+	units       List of strings or None. The headers for the second row (units).
+	            If None, the unit row is not written.
+	index       List of strings. The row headers. This may be a list of lists
+	            for a multi-column row header.
+	label_text  String or list of strings. These are inserted in the top left
+	            table entry, if there is space (i.e., if index is set).
+	sep         String of length 1. The column separator.
+	"""
+	if isinstance(data, dict):
+		data = list(data.values())
+
+	index = parse_index_csv(index=index)
+	columns = parse_label_text_csv(columns, index, label_text=label_text)
+	formats = ["%s"] * len(index) + formats
+	if units is not None:
+		units = [""] * len(index) + units
+
+	with open(filename, 'w', encoding='utf-8', newline='') as f:
+		writer = csv.writer(f, delimiter=sep)
+		writer.writerow(columns)
+		if units is not None:
+			writer.writerow(units)
+		for row in zip(*index, *data):
+			writer.writerow([fmtnan(fmt, x) for fmt, x in zip(formats, row)])
+
+def alignwrite(filename, data, formats, columns=None, units=None, index=None, label_text=None, sep=' '):
+	"""Basic writer for column based data to a column-aligned text file
+
+	Arguments:
+	filename    String. The output file name.
+	data        Dict, list, or array. The column data.
+	formats     List of strings. Format specifiers fmt that can be applied to
+	            values x as fmt % x.
+	columns     List of strings. The column headers.
+	units       List of strings or None. The headers for the second row (units).
+	            If None, the unit row is not written.
+	index       List of strings. The row headers. This may be a list of lists
+	            for a multi-column row header.
+	label_text  String or list of strings. These are inserted in the top left
+	            table entry, if there is space (i.e., if index is set).
+	sep         String. The column separator. Unlike for csvwrite(), the string
+	            sep may be of length > 1.
+	"""
+	if isinstance(data, dict):
+		data = list(data.values())
+
+	index = parse_index_csv(index=index)
+	columns = parse_label_text_csv(columns, index, label_text=label_text)
+	formats = ["%s"] * len(index) + formats
+	if units is not None:
+		units = [""] * len(index) + units
+	widths = get_column_widths(data, formats, columns, units, index=index)
+
+	with open(filename, 'w', encoding='utf-8') as f:
+		f.write(format_row(columns, sep, widths=widths) + "\n")
+		if units is not None:
+			f.write(format_row(units, sep, widths=widths) + "\n")
+		for row in zip(*index, *data):
+			f.write(sep.join([fmtnan(fmt, x).rjust(w) for fmt, x, w in zip(formats, row, widths)]) + '\n')
+
+def pdwrite(filename, data, formats, columns=None, units=None, index=None, label_text=None, sep=','):
+	"""Basic writer for column based data using a pandas DataFrame
+
+	Arguments:
+	filename    String. The output file name.
+	data        List or array of column data.
+	formats     List of strings. Format specifiers fmt that can be applied to
+	            values x as fmt % x. Note that pandas supports only a single
+	            float_format; thus, the float_format is extracted from this list
+	            by majority.
+	columns     List of strings. The column headers.
+	units       List of strings or None. The headers for the second row (units).
+	            If None, the unit row is not written.
+	index       List of strings. The row headers. This may be a list of lists
+	            for a multi-column row header.
+	label_text  String or list of strings. These are inserted in the top left
+	            table entry, if there is space (i.e., if index is set).
+	sep         String of length 1. The column separator.
+	"""
+	index = parse_index_pandas(index)
+	do_index = index is not None and len(index) > 0
+	label_text = parse_label_text_pd(index, label_text=label_text)
+	float_format = extract_float_format(formats)
+
+	if isinstance(data, dict):
+		dataframe = pd.DataFrame(data, index = index)
+	elif isinstance(data, (list, np.ndarray)):
+		dataframe = pd.DataFrame(zip(*data), index = index)
+	else:
+		raise TypeError("Invalid type for data")
+
+	if units is not None:
+		dataframe.columns = pd.MultiIndex.from_arrays((columns, units))
+	else:
+		dataframe.columns = columns
+
+	with open(filename, 'w', encoding='utf-8') as f:
+		dataframe.to_csv(f, float_format=float_format, index=do_index, index_label=label_text, sep=sep)
+
+def write(
+		filename, data, formats, columns=None, units=None, index=None,
+		label_text=None, csvstyle=None, axislabels=None, axisunits=None,
+		datalabel=None, dataunit=None, extraheader=None):
+	"""Write table. Wrapper function that selects the specific write function that does the actual job.
+
+	Arguments:
+	filename     String. The output file name.
+	data         Dict, list, or array. The column data.
+	formats      List of strings. Format specifiers fmt that can be applied to
+	             values x as fmt % x.
+	columns      List of strings. The column headers.
+	units        List of strings or None. The headers for the second row
+	             (units). If None, the unit row is not written.
+	index        List of strings. The row headers. This may be a list of lists
+	             for a multi-column row header.
+	label_text   String or list of strings. These are inserted in the top left
+	             table entry, if there is space (i.e., if index is set).
+	csvstyle     String or None. Used to select the specific writer function. If
+	             None (default), get CSV style from configuration value
+	             csv_style.
+	axislabels   List of strings or None. The labels of the x and y axes. These
+	             are written at the right end of the first row and the bottom
+	             end of the first column, respectively. If None, do not write
+	             these labels.
+	axisunits    List of strings or None. Units associated to the x and y axes.
+	             These are written at the right end of the first row and the
+	             bottom end of the first column (i.e., right of and below the
+	             axes labels), respectively. If None, do not write the units.
+	datalabel    String or None. Label that is written at the right end of the
+	             second row. This should typically be the quantity that the data
+	             represents. If None, do not write a label.
+	dataunit     String or None. Unit associated to the data. This is printed on
+	             the row directly after the data label.
+	extraheader  List of strings, list of lists of strings, or None. Extra rows
+	             that are inserted into the file afterwards. These are used for
+	             band labels, for example. If a nested list, write a multiple
+	             rows. If a flat list, write a single row. If None, do not
+	             insert anything. The position of insertion is determined by the
+	             configuration value csv_bandlabel_position.
+	"""
+	if csvstyle is None:
+		csvstyle = get_csv_style()
+
+	if csvstyle == 'csvinternal' or (csvstyle == 'csv' and not HAS_PD):
+		csvwrite(filename, data, formats, columns=columns, units=units, index=index, label_text=label_text, sep=',')
+		sep = ','
+		widths = None
+	elif csvstyle == 'csvpandas' or (csvstyle == 'csv' and HAS_PD):
+		pdwrite(filename, data, formats, columns=columns, units=units, index=index, label_text=label_text, sep=',')
+		sep = ','
+		widths = None
+	elif csvstyle == 'align':
+		alignwrite(filename, data, formats, columns=columns, units=units, index=index, label_text=label_text, sep=' ')
+		sep = ' '
+		widths = get_column_widths(data, formats, columns, units, index=parse_index_csv(index=index))
+	else:
+		raise ValueError("Invalid value for configuration parameter csv_style")
+
+	if axislabels or axisunits or datalabel or dataunit:
+		write_axislabels(filename, axislabels=axislabels, axisunits=axisunits, datalabel=datalabel, dataunit=dataunit, sep=sep, widths=widths)
+	if extraheader:
+		write_extraheader(filename, extraheader, row=get_bandlabel_position(), sep=sep, widths=widths)
diff --git a/kdotpy-v1.0.0/src/kdotpy/tasks.py b/kdotpy-v1.0.0/src/kdotpy/tasks.py
new file mode 100644
index 0000000000000000000000000000000000000000..2195ce67b4f7bfb8aadce40f71c14a0eceeffc0b
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/tasks.py
@@ -0,0 +1,303 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import sys
+from time import sleep, perf_counter, time as rtime
+from multiprocessing.pool import Pool as ProcessPool, ThreadPool
+from queue import PriorityQueue
+from platform import system
+import signal
+
+from .config import get_config_num
+from .parallel import show_job_monitor, TerminateSignal, signalstr, init_worker
+from . import cmdargs
+
+
+class TaskWrapper:
+    """Wraps around any tasks work load function, taking care of any arguments.
+    This object is can be pickled and sent to other processes."""
+    def __init__(self, func, *args, **kwds):
+        self.func = func
+        self.args = args
+        self.kwds = kwds
+
+    def run(self):
+        """Run the task (function with arguments). Typically executed
+        within the scope of a worker thread/process."""
+        return self.func(*self.args, **self.kwds)
+
+
+class Task:
+    """Class that holds function handles and parallelization settings (from Model class)
+     and parameters (from DiagDataPoint).
+     Takes care of status information output and error handling."""
+    def __init__(self, queue, name=None, worker_func=None, callback_func=None, worker_type=None,
+                 n_proc=1, n_threads=1, gpu_workers=0, priority=0):
+        """Create and enqueue a new Task."""
+        if not isinstance(queue, TaskManager):
+            raise TypeError('Task creation: Arg queue must specify a queue of class TaskManager')
+        self.queue = queue
+        self.name = name if name is not None else worker_func.__name__ + '_%d' % (perf_counter() * 1e9)
+        if not callable(worker_func):
+            raise TypeError('Task: Worker function must be a callable function handle.')
+        self.worker_func = worker_func  # must return a single value
+        self.callback = self.wrap_callback(callback_func)  # must return a new Task or None and save the result from worker_func into an object
+        self.worker_type = worker_type
+        self.n_proc = n_proc
+        self.n_threads = n_threads
+        self.gpu_workers = gpu_workers
+        self.priority = priority
+        self.start_time = None
+        self.error_counter = 0
+        self.retries = get_config_num('task_retries')
+        queue.put(self)
+
+    def __str__(self):
+        """Print a human readable label for this task. Note that start time is not a real-time stamp."""
+        if self.start_time is None:
+            return 'Task: %s (%d)' % (self.name, self.priority)
+        else:
+            return 'Task: %s (%d), started: %f' % (self.name, self.priority, self.start_time)
+
+    def __lt__(self, other):
+        """Definition of priority ordering.
+        In case priority (numeric or tuple) is equal,
+        do tasks with use less threads first."""
+        if self.priority == other.priority:
+            return self.threadsum < other.threadsum
+        return self.priority < other.priority
+
+    def __eq__(self, other):
+        return self.priority == other.priority and self.threadsum == other.threadsum
+
+    @property
+    def threadsum(self):
+        """Total threads used by this task (processes times threads per process)."""
+        return self.n_threads * self.n_proc
+
+    def run(self):
+        """Run this task on a suitable parallel worker pool or sequentially in the main process.
+        Optional status output."""
+        self.start_time = rtime()
+        p_pool = self.queue.p_pool
+        t_pool = self.queue.t_pool
+        show_job_monitor("Entering %s..." % self.name)
+        if self.worker_type == 'process' and isinstance(p_pool, ProcessPool):
+            p_pool.apply_async(self.worker_func, callback=self.callback, error_callback=self.error_callback)
+            return self.threadsum
+        elif self.worker_type == 'thread' and isinstance(t_pool, ThreadPool):
+            t_pool.apply_async(self.worker_func, callback=self.callback, error_callback=self.error_callback)
+            return self.n_threads
+        else:
+            # work on this task in sequential mode
+            self.callback(self.worker_func())
+            return self.threadsum
+
+    def wrap_callback(self, org_cb):
+        """Modify callback function handle."""
+        def extended_callback(*args, **kwds):
+            """Add task finish console output and notify queue about finished task."""
+            retval = org_cb(*args, **kwds)  # execute original callback
+            show_job_monitor("Finished %s (%.3f s)" % (self.name, rtime() - self.start_time))
+            self.queue.done(self)
+            return retval  # return original callback result
+        return extended_callback
+
+    def error_callback(self, exception):
+        """Alternative callback to catch errors during task execution.
+        Notify about failed tasks and retry for configured number of tries.
+        Finally, skip task."""
+        self.error_counter += 1
+        if self.error_counter > self.retries:
+            show_job_monitor(
+                "EXCEPTION in %s! (%.3f s)[%s] Skipped after %d tries." %
+                (self.name, rtime() - self.start_time, exception, self.error_counter)
+            )
+            self.queue.done(self, skipped=True)
+        else:
+            show_job_monitor("EXCEPTION in %s! (%.3f s)[%s] Restarting..." % (self.name, rtime() - self.start_time, exception))
+            self.run()
+
+
+class TaskManager(PriorityQueue):
+    """Extended Queue class to schedule Task objects and handle worker pools.
+
+    Only use this class in a main process/thread, as it creates worker pools,
+    which is not allowed from within an existing worker pool.
+
+    Redefinition of the base class changes scheduling
+    behaviour between LIFO, FIFO and Priority.
+    """
+    def __init__(self, max_total_threads = None, handle_sigchld = True):
+        """Create extended Queue class to schedule Task objects and handle worker pools.
+
+        See documentation for TaskManager class for more information.
+
+        Arguments:
+        max_total_threads  Integer.
+        handle_sigchld     True (default) or False. The value tells the
+                           TaskManager to redefine SIGCHLD to terminate on that
+                           signal. This is needed to make multiprocessing handle
+                           the case that a child process dies in a graceful way.
+                           However, this can interfere with some (external)
+                           solvers, like jax. For those solvers, handle_sigchld
+                           should be set to False. The value does not affect
+                           behaviour on Windows.
+        """
+        super().__init__()
+        if max_total_threads is None:
+            n_threads = cmdargs.threads()
+            n_cpus, max_cpus = cmdargs.cpus()
+            n_gpus = cmdargs.gpu_workers()
+            self.max_workers = n_cpus
+            self.max_gpu_workers = n_gpus if n_gpus is not None else n_cpus
+            self.max_total_threads = n_cpus * (n_threads if n_threads is not None else 1)
+        else:
+            self.max_workers = max_total_threads
+            self.max_total_threads = max_total_threads
+            self.max_gpu_workers = max_total_threads
+        self.running_threads = 0
+        self.running_workers = 0
+        self.running_gpu_workers = 0
+        self.skip_counter = 0
+        self.p_pool, self.t_pool = None, None
+        self.handle_sigchld = handle_sigchld
+
+    def sig_handler(self, s, fr):
+        raise TerminateSignal(s)
+
+    def __enter__(self):
+        """Start up pools when entering task manager context."""
+        if system() == 'Windows':
+            self.siglist = [signal.SIGTERM, signal.SIGABRT]
+        elif self.handle_sigchld:
+            self.siglist = [signal.SIGTERM, signal.SIGABRT, signal.SIGUSR1, signal.SIGUSR2, signal.SIGCHLD]
+        else:
+            self.siglist = [signal.SIGTERM, signal.SIGABRT, signal.SIGUSR1, signal.SIGUSR2]
+        for s in self.siglist:
+            signal.signal(s, self.sig_handler)
+        if self.max_workers > 1:
+            show_job_monitor("Starting worker pools...")
+            self.p_pool = ProcessPool(self.max_workers, initializer = init_worker)
+            self.t_pool = ThreadPool(self.max_workers)
+        self.start_time = rtime()
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        """Close worker pools when leaving task manager context.
+        Print run time information and skipped jobs."""
+        show_job_monitor("Total compute time %d s." % (rtime() - self.start_time))
+        if self.skip_counter > 0:
+            show_job_monitor("Skipped %d tasks." % self.skip_counter)
+        for s in self.siglist:
+            signal.signal(s, signal.SIG_DFL)
+        if system() != 'Windows':
+            signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+        if self.max_workers > 1:
+            if exc_type is None:  # Pool exits normally
+                self.p_pool.close()
+                self.t_pool.close()
+            else:  # Covers also: isinstance(exc_val, (KeyboardInterrupt, Exception))
+                sys.stderr.write("TaskManager: %s\nTerminating pools...\n" % exc_type.__name__)
+                self.p_pool.terminate()
+                self.t_pool.terminate()
+            self.p_pool.join()
+            self.t_pool.join()
+
+            if isinstance(exc_val, TerminateSignal):
+                sys.stderr.write("EXIT %i (%s)\n" % (128 + exc_val.signum, signalstr(exc_val.signum)))
+                exit(128 + exc_val.signum)
+            elif isinstance(exc_val, KeyboardInterrupt):
+                sys.stderr.write("EXIT %i (%s)\n" % (128 + signal.SIGINT, signalstr(signal.SIGINT)))
+                exit(128 + signal.SIGINT)
+            # In all other cases unhandled errors are propagated automatically
+
+    def done(self, task, skipped=False):
+        """Keep track of finished jobs and available resources."""
+        super().task_done()
+        self.running_threads -= task.threadsum
+        self.running_workers -= task.n_proc
+        self.running_gpu_workers -= task.gpu_workers
+        if skipped:
+            self.skip_counter += 1
+
+    def do_all(self):
+        """Run all tasks in queue, blocking the calling thread until none are left.
+
+        This blocks the calling thread until there are no tasks left in the
+        queue. Tasks with process and thread worker strategy are executed
+        outside of the calling thread, but tasks without a parallel worker
+        strategy are executed in the calling thread."""
+        while self.unfinished_tasks:
+            if not self.empty() and self.running_workers < self.max_workers:
+                task = None
+                if self.running_gpu_workers >= self.max_gpu_workers:
+                    # GPU is fully in use, find CPU only tasks.
+                    gpu_task_list = []
+                    while not self.empty():
+                        task = self.get()
+                        if task.gpu_workers > 0:
+                            gpu_task_list.append(task)
+                            task = None
+                        else:
+                            break  # Task found, stop searching through Queue
+                    for gtask in gpu_task_list:
+                        self.put(gtask)  # requeue removed GPU tasks
+                        super().task_done()  # requeueing increases unfinished tasks counter.
+                else:
+                    task = self.get()
+
+                # Check free CPU and GPU resources:
+                if task is not None:
+                    if self.running_threads + task.threadsum <= self.max_total_threads \
+                            and self.running_gpu_workers + task.gpu_workers <= self.max_gpu_workers:
+                        task.run()  # run
+                        self.running_threads += task.threadsum
+                        self.running_workers += 1
+                        self.running_gpu_workers += task.gpu_workers
+                    else:
+                        self.put(task)  # this task was to big to fit right now, so requeue it
+                        super().task_done()  # as requeueing increases the unfinished task counter, reduce it
+            else:
+                # All workers busy. Nothing to be enqueued.
+                # Send this thread to sleep to yield cpu time.
+                # Do not sleep too long, as this would slow down queueing,
+                # especially with fast tasks and few workers.
+                sleep(0.005)
diff --git a/kdotpy-v1.0.0/src/kdotpy/testselfcon.py b/kdotpy-v1.0.0/src/kdotpy/testselfcon.py
new file mode 100644
index 0000000000000000000000000000000000000000..12cb76a16bad95d17812fa337a1efed98e6c8250
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/testselfcon.py
@@ -0,0 +1,145 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import sys
+
+if sys.version_info < (3, 9):
+    sys.stderr.write("ERROR: Python version 3.9 or higher is required.\n")
+    exit(1)
+import os.path
+import subprocess
+import shlex
+from platform import system
+from datetime import datetime
+
+
+def get_git_revision_short_hash() -> str:
+    return (
+        subprocess.check_output(["git", "rev-parse", "--short", "HEAD"])
+        .decode("ascii")
+        .strip()
+    )
+
+
+# -- GLOBALS
+scriptdir = os.path.dirname(os.path.realpath(__file__))
+outdir = f"selfcon-reference-calculations/commit-{get_git_revision_short_hash()}"
+_python_cmd = "python" if system() == "Windows" else None
+
+
+def run(cmd, args):
+    args = [os.path.join(scriptdir, cmd)] + args
+    if _python_cmd is not None:
+        args = [_python_cmd] + args
+    try:
+        cp = subprocess.run(args)
+    except OSError:
+        if system() != "Windows":
+            args = " ".join([shlex.quote(arg) for arg in args])
+        cp = subprocess.run(args, shell=True)
+    return cp
+
+
+def symmetric_stack_30nm_efield_top_dependency():
+    cmd1 = "kdotpy-batch.py"
+    cmd2 = "kdotpy-2d.py"
+    args1 = "@etop -20 20 / 40".split(" ")
+    args2 = (
+        "8o kx 0 0.5 / 0.005 msubst CdTe mlayer HgCdTe 68% HgTe HgCdTe 68% "
+        "llayer 10 30 10 zres 0.25 targetenergy 0 neig 30 erange -60 120 "
+        "split 0.01 legend char plotstyle auto "
+        "config table_wf_precision=8;selfcon_debug=true;selfcon_dynamic_time_step=true "
+        "noax selfcon 100 0.01 efield 0 @etop dos obs z "
+        f"outdir {outdir}/symmetric_stack_30nm_efield_top_dependency "
+        "out .30nm_well.efield_0_@etop".split(" ")
+    )
+    args = args1 + ["do", "python", os.path.join(scriptdir, cmd2)] + args2
+    run(cmd1, args)
+
+
+def symmetric_stack_45nm_efield_top_dependency():
+    cmd1 = "kdotpy-batch.py"
+    cmd2 = "kdotpy-2d.py"
+    args1 = "@etop -20 20 / 40".split(" ")
+    args2 = (
+        "8o kx 0 0.65 / 0.005 msubst CdTe mlayer HgCdTe 68% HgTe HgCdTe 68% "
+        "llayer 10 45 10 zres 0.25 targetenergy 0 neig 40 erange -60 150 "
+        "split 0.01 legend char plotstyle auto "
+        "config table_wf_precision=8;selfcon_debug=true;selfcon_dynamic_time_step=true "
+        "noax selfcon 100 0.01 efield 0 @etop dos obs z "
+        f"outdir {outdir}/symmetric_stack_45nm_efield_top_dependency "
+        "out .45nm_well.efield_0_@etop".split(" ")
+    )
+    args = args1 + ["do", "python", os.path.join(scriptdir, cmd2)] + args2
+    run(cmd1, args)
+
+
+def symmetric_stack_70nm_efield_top_dependency():
+    cmd1 = "kdotpy-batch.py"
+    cmd2 = "kdotpy-2d.py"
+    args1 = "@etop 14 -14 / 14".split(" ")
+    args2 = (
+        "8o kx 0 0.65 / 0.005 msubst CdTe mlayer HgCdTe 68% HgTe HgCdTe 68% "
+        "llayer 10 70 10 zres 0.25 targetenergy -20 neig 100 erange -60 200 "
+        "split 0.01 legend char plotstyle auto "
+        "config table_wf_precision=8;selfcon_debug=true;selfcon_dynamic_time_step=true "
+        "noax selfcon 100 0.01 selfconweight 0.6 efield 0 @etop dos obs z "
+        f"outdir {outdir}/symmetric_stack_70nm_efield_top_dependency "
+        "out .70nm_well.efield_0_@etop".split(" ")
+    )
+    args = args1 + ["do", "python", os.path.join(scriptdir, cmd2)] + args2
+    run(cmd1, args)
+
+def main():
+    fcns = [
+        symmetric_stack_30nm_efield_top_dependency,
+        symmetric_stack_45nm_efield_top_dependency,
+        symmetric_stack_70nm_efield_top_dependency,
+    ]
+    for fcn in fcns:
+        print(f"Starting {fcn.__name__}.")
+        start = datetime.now()
+        fcn()
+        end = datetime.now()
+        print(f"Finished {fcn.__name__} after {end-start}.")
+
+if __name__ == '__main__':
+    main()
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/transitions.py b/kdotpy-v1.0.0/src/kdotpy/transitions.py
new file mode 100644
index 0000000000000000000000000000000000000000..f7f22f8990b0cd6481923fd01cc9bbff77bb7303
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/transitions.py
@@ -0,0 +1,977 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+import sys
+from scipy.sparse import dia_matrix
+from .physconst import hbar, hbarm0, eoverhbar, eovereps0, cLight
+from .momentum import Vector
+from .hamiltonian import hz_sparse_ll_full
+from .lltools import delta_n_ll
+from .observables import blockdiag
+from .config import get_config_num
+from .density import BroadeningFunction
+
+### DATA STRUCTURE ###
+_refr_index_warning = False
+_trans_bval_warning = False
+class TransitionsData:
+	"""Container class for storing properties of optical transitions
+
+	Attributes:
+	n           Integer. Number of transitions.
+	energies    Array of shape (n, 2). Energies of the source and target states.
+	amplitudes  Array of shape (n,). The amplitudes are the absolute squares of
+				the velocity matrix elements (|v|^2). For the result of 'Fermi's
+	            golden rule', see method rate_density.
+	llindex     Array of shape (n, 2). LL indices of the two states.
+	bindex      None or array of shape (n, 2). Band indices of the two states.
+	occupancy   None or array of shape (n, 2). The occupancy of the two states.
+	            This is calculated by evaluating the occupation function
+	            (typically the Fermi function) at the two energies.
+	bval        If a single number or Vector, then the magnetic-field value that
+	            applies to all transitions. If an array of shape (n,), then the
+	            magnetic-field values that apply to the transitions
+	            individually.
+	refr_index  None or number. The refractive index of the system.
+
+	Note:
+	The arguments of the __init__() method have the same name and come in the
+	same order as listed above.
+	"""
+	def __init__(self, energies = None, amplitudes = None, llindex = None, bindex = None, occupancy = None, bval = None, refr_index = None):
+		if energies is None:
+			self.energies = np.zeros((0, 2))
+		elif isinstance(energies, np.ndarray) and energies.ndim == 2:
+			if energies.shape[0] == 2 and energies.shape[1] != 2:
+				self.energies = energies.T
+			elif energies.shape[1] == 2:
+				self.energies = energies
+			else:
+				raise ValueError("Argument 'energies' must be an array whose length is 2 in either the first or second dimension")
+		elif isinstance(energies, tuple) and len(energies) == 2:
+			self.energies = np.vstack(energies).T
+		else:
+			raise TypeError("Argument 'energies' must be an array or a 2-tuple")
+		self.n = self.energies.shape[0]
+
+		if amplitudes is None:
+			if self.n == 0:
+				self.amplitudes = np.zeros(0)
+			else:
+				raise ValueError("Argument 'amplitudes' must be an array of shape (%i,)" % self.n)
+		elif isinstance(amplitudes, np.ndarray):
+			if amplitudes.shape == (self.n,):
+				self.amplitudes = amplitudes
+			else:
+				raise ValueError("Argument 'amplitudes' must be an array of shape (%i,)" % self.n)
+		elif isinstance(amplitudes, list):
+			if len(amplitudes) == self.n:
+				self.amplitudes = np.array(amplitudes)
+			else:
+				raise ValueError("Argument 'amplitudes' must be an array-like instance of length %i" % self.n)
+		else:
+			raise TypeError("Argument 'amplitudes' must be an array-like instance")
+
+		if llindex is None:
+			if self.n == 0:
+				self.llindex = np.zeros((0, 2))
+			else:
+				raise ValueError("Argument 'llindex' must be an array of shape (%i, %i)" % (2, self.n))
+		elif isinstance(llindex, np.ndarray):
+			if llindex.shape == (2, self.n):
+				self.llindex = llindex.T
+			elif llindex.shape == (self.n, 2):
+				self.llindex = llindex
+			else:
+				raise ValueError("Argument 'llindex' must be an array of shape (%i, %i)" % (2, self.n))
+		elif isinstance(llindex, tuple) and len(llindex) == 2:
+			if len(llindex[0]) == self.n or len(llindex[1]) == self.n:
+				self.llindex = np.vstack(llindex).T
+			else:
+				raise ValueError("Argument 'llindex' must be a two-tuple of array-like instances of length %i" % self.n)
+		else:
+			raise TypeError("Argument 'llindex' must be an array or a 2-tuple")
+
+		if isinstance(bindex, np.ndarray):
+			if bindex.shape == (2, self.n):
+				self.bindex = bindex.T
+			elif bindex.shape == (self.n, 2):
+				self.bindex = bindex
+			else:
+				raise ValueError("Argument 'bindex' must be an array of shape (%i, %i)" % (2, self.n))
+		elif isinstance(bindex, tuple) and len(bindex) == 2:
+			if len(bindex[0]) == self.n or len(bindex[1]) == self.n:
+				self.bindex = np.vstack(bindex).T
+			else:
+				raise ValueError("Argument 'bindex' must be a two-tuple of array-like instances of length %i" % self.n)
+		elif bindex is None:
+			self.bindex = None
+		else:
+			raise TypeError("Argument 'bindex' must be an array or a 2-tuple")
+		if occupancy is None:
+			self.occupancy = None
+		elif isinstance(occupancy, (int, float, np.integer, np.floating)):
+			self.occupancy = np.full(self.n, occupancy)
+		elif isinstance(occupancy, np.ndarray):
+			if occupancy.shape != (self.n,):
+				raise ValueError("Argument 'occupancy' must be a one-dimensional array of length %i" % self.n)
+			self.occupancy = occupancy
+		else:
+			raise TypeError("Argument 'occupancy' must be an array, number, or None")
+		if self.occupancy is not None and (np.amin(self.occupancy) < 0.0 or np.amax(self.occupancy) > 1.0):
+			raise ValueError("Argument 'occupancy' must not contain values < 0 or > 1")
+		if isinstance(bval, np.ndarray):
+			if bval.shape == (self.n,):
+				self.bval = bval
+			elif bval.shape == (1,):
+				self.bval = bval[0]
+			else:
+				raise ValueError("If argument 'bval' is an array, its length must be either 1 or n.")
+		elif isinstance(bval, (float, int, np.floating, np.integer)):
+			self.bval = float(bval)
+		elif isinstance(bval, Vector):
+			self.bval = bval
+		elif bval is None:
+			self.bval = None
+		else:
+			raise TypeError("Argument 'bval' must be an array of length 1 or n, a Vector, a number, or None.")
+		if isinstance(refr_index, (float, int, np.floating, np.integer)):
+			self.refr_index = float(refr_index)
+		elif refr_index is None:
+			self.refr_index = None
+		else:
+			raise TypeError("Argument 'refr_index' must be numeric or None.")
+
+	def set_bindex(self, eival, llindex, bindex):
+		"""Set band indices
+
+		Arguments:
+		eival     Array of eigenvalues
+		llindex   Array of LL indices
+		bindex    Array of band indices
+
+		Note:
+		The input arrays must be of the same length.
+		"""
+		if len(eival) != len(bindex):
+			raise ValueError("Arguments eival, llindex, and bindex must be of equal length")
+		self.bindex = np.zeros((self.n, 2), dtype = int)
+		if llindex is None:
+			ei2 = np.asarray(eival)
+			bi2 = np.asarray(bindex)
+			for j in [0, 1]:  # iterate over two rows in the data arrays
+				ei1 = self.energies[:, j]
+				# find index of matching energy
+				idx = np.argmin(np.abs(ei1[:, np.newaxis] - ei2[np.newaxis, :]), axis = 1)
+				if np.amin(np.abs(ei1 - ei2[idx])) >= 1e-6:
+					sys.stderr.write("Warning (TransitionsData.set_bindex): Non-matching energy eigenvalues\n")
+				self.bindex[:, j] = np.where(np.abs(ei1 - ei2[idx]) < 1e-6, bi2[idx], np.zeros_like(idx))
+		else:
+			if len(eival) != len(llindex):
+				raise ValueError("Arguments eival, llindex, and bindex must be of equal length")
+			ll_idx = np.unique(llindex)
+			for lln in ll_idx:  # iterate over ll indices
+				# constrain to ll index in input data
+				sel2 = np.asarray(llindex) == lln
+				ei2 = np.asarray(eival)[sel2]
+				bi2 = np.asarray(bindex)[sel2]
+				for j in [0, 1]:  # iterate over two rows in the data arrays
+					sel1 = self.llindex[:, j] == lln
+					ei1 = self.energies[:, j][sel1]
+					if np.count_nonzero(sel1) == 0:
+						continue
+					# find index of matching energy
+					idx = np.argmin(np.abs(ei1[:, np.newaxis] - ei2[np.newaxis, :]), axis = 1)
+					if np.amin(np.abs(ei1 - ei2[idx])) >= 1e-6:
+						sys.stderr.write("Warning (TransitionsData.set_bindex): Non-matching energy eigenvalues\n")
+					self.bindex[:, j][sel1] = np.where(np.abs(ei1 - ei2[idx]) < 1e-6, bi2[idx], np.zeros_like(idx))
+		if np.count_nonzero(self.bindex) != 2 * self.n:
+			sys.stderr.write("Warning (TransitionsData.set_bindex): Not all eigenvalues could be matched\n")
+
+	def set_refractive_index(self, refr_index):
+		"""Set refractive index."""
+		if isinstance(refr_index, (float, int, np.floating, np.integer)):
+			self.refr_index = float(refr_index)
+		elif refr_index is None:
+			self.refr_index = None
+		else:
+			raise TypeError("Argument 'refr_index' must be numeric or None.")
+
+	def canonical_order(self, in_place = False):
+		"""Put transitions into canonical order, i.e., with the lowest LL as the first state.
+
+		Argument:
+		in_place  If True, reorder present instance and return self. If False,
+		          return a new TransitionsData instance.
+		"""
+		ll_order = np.argsort(self.llindex, axis = 1)
+		idx = np.indices(self.energies.shape)[0]
+		energies_new = self.energies[idx, ll_order]
+		llindex_new = self.llindex[idx, ll_order]
+		bindex_new = None if self.bindex is None else self.bindex[idx, ll_order]
+		if in_place:
+			self.energies = energies_new
+			self.llindex = llindex_new
+			self.bindex = bindex_new
+			return self
+		else:
+			return TransitionsData(energies_new, self.amplitudes, llindex_new, bindex = bindex_new, occupancy = self.occupancy, bval = self.bval, refr_index = self.refr_index)
+
+	def sort(self, in_place = False, remove_duplicates = True, accuracy_digits = 6, llsort = True):
+		"""Sort transitions by energy
+
+		Arguments:
+		in_place            If True, reorder present instance and return self.
+		                    If False, return a new TransitionsData instance.
+		remove_duplicates   True (default) or False. If True, remove duplicate
+		                    transitions, i.e., those for which LL indices and
+		                    energies are equal.
+		accuracy_digits     Number of digits of precision in energy comparison
+		                    for testing duplicates. If this value is x, the
+		                    accuracy is 10^-x.
+		llsort				Enable sorting with respect to LL indices as last step.
+							Not recommended in full LL mode.
+
+		Note:
+		The default sorting algorithm used by numpy's argsort is the 'quicksort'
+		algorithm. Here, we use 'mergesort' for the second and subsequent
+		sorting steps; 'mergesort' is slower and requires more resources, but is
+		stable unlike 'quicksort'. See the documentation of numpy.sort for more
+		information.
+		"""
+		# sort by energies within transition
+		order = np.argsort(self.energies, axis = 1)
+		energies_new = np.take_along_axis(self.energies, order, axis = 1)
+		llindex_new = np.take_along_axis(self.llindex, order, axis = 1)
+		bindex_new = None if self.bindex is None else np.take_along_axis(self.bindex, order, axis = 1)
+		# sort by energy 2
+		order = np.argsort(np.around(energies_new[:, 1], decimals = accuracy_digits))
+		energies_new = energies_new[order, :]
+		llindex_new = llindex_new[order, :]
+		amplitudes_new = self.amplitudes[order]
+		bindex_new = None if bindex_new is None else bindex_new[order, :]
+		occupancy_new = None if self.occupancy is None else self.occupancy[order]
+		# sort by energy 1
+		order = np.argsort(np.around(energies_new[:, 0], decimals = accuracy_digits), kind = 'mergesort')
+		energies_new = energies_new[order, :]
+		llindex_new = llindex_new[order, :]
+		amplitudes_new = amplitudes_new[order]
+		bindex_new = None if bindex_new is None else bindex_new[order, :]
+		occupancy_new = None if occupancy_new is None else occupancy_new[order]
+		# sort by LL index
+		if llsort:
+			order = np.argsort(llindex_new[:, 0], kind = 'mergesort')
+			energies_new = energies_new[order, :]
+			llindex_new = llindex_new[order, :]
+			amplitudes_new = amplitudes_new[order]
+			bindex_new = None if bindex_new is None else bindex_new[order, :]
+			occupancy_new = None if occupancy_new is None else occupancy_new[order]
+		# Remove duplicates (for which the energies and ll indices are equal)
+		eacc = 10.**(-accuracy_digits)  # energy accuracy
+		if remove_duplicates and self.n > 0:
+			sel = ~np.concatenate(([False], np.all((np.abs(energies_new[1:, :] - energies_new[:-1, :]) < eacc) & (llindex_new[1:, :] == llindex_new[:-1, :]), axis = 1)))
+			energies_new = energies_new[sel, :]
+			llindex_new = llindex_new[sel, :]
+			amplitudes_new = amplitudes_new[sel]
+			bindex_new = None if bindex_new is None else bindex_new[sel, :]
+			occupancy_new = None if occupancy_new is None else occupancy_new[sel]
+		if in_place:
+			self.energies = energies_new
+			self.llindex = llindex_new
+			self.amplitudes = amplitudes_new
+			self.bindex = bindex_new
+			self.occupancy = occupancy_new
+			self.n = self.energies.shape[0]
+			return self
+		else:
+			return TransitionsData(energies_new, amplitudes_new, llindex_new, bindex = bindex_new, occupancy = occupancy_new, bval = self.bval, refr_index = self.refr_index)
+
+	def at_energy(self, e, broadening = None, ampmin = None, index=None):
+		"""Get transitions that "cross" the energy e, i.e., which have one state above e and one state below e.
+
+		Arguments:
+		e           Fermi energy
+		broadening  BroadeningFunction or None. This defines the occupation
+		            function f_occ(energy[i] - e). If set to None, use a step
+		            function.
+		ampmin      None or a number. Ignore transitions whose amplitude is
+		            below this number. If None, use the configuration value
+		            'transitions_min_amplitude'.
+		index       None or int. This argument is passed to the occupation
+		            function if broadening is set.
+
+		Returns:
+		A new TransitionsData instance.
+		"""
+		if ampmin is None:
+			ampmin = get_config_num("transitions_min_amplitude", minval = 0)
+		if np.isnan(e):
+			return TransitionsData(bval = self.bval, refr_index = self.refr_index)
+		elif broadening is None:  # No broadening given: effectively use step function
+			sel = (self.energies[:, 0] <= e) ^ (self.energies[:, 1] <= e)  # XOR
+			if not np.any(sel):
+				return TransitionsData(bval = self.bval, refr_index = self.refr_index)
+			energies_new = self.energies[sel, :]
+			llindex_new = self.llindex[sel, :]
+			amplitudes_new = self.amplitudes[sel]
+			bindex_new = None if self.bindex is None else self.bindex[sel, :]
+			return TransitionsData(energies_new, amplitudes_new, llindex_new, bindex = bindex_new, occupancy = 1.0, bval = self.bval, refr_index = self.refr_index)
+		else:  # Use occupancy function from broadening
+			occ = broadening.occupation(self.energies - e, index=index)
+			occ_factor = np.abs(occ[:, 0] - occ[:, 1])  # |f_init - f_fin|
+			if ampmin is not None:
+				sel = ((self.amplitudes * occ_factor) >= ampmin)  # filter on transition rate * occupancy
+			else:
+				sel = np.ones(self.n, dtype = bool)
+			if not np.any(sel):
+				return TransitionsData()
+			energies_new = self.energies[sel, :]
+			llindex_new = self.llindex[sel, :]
+			amplitudes_new = self.amplitudes[sel]
+			bindex_new = None if self.bindex is None else self.bindex[sel, :]
+			return TransitionsData(energies_new, amplitudes_new, llindex_new, bindex = bindex_new, occupancy = occ_factor[sel], bval = self.bval, refr_index = self.refr_index)
+
+	def delta_e(self, absolute = True):
+		"""Energy differences
+
+		Argument:
+		absolute   If True, return absolute value. If False, return signed
+		           difference.
+		"""
+		ediff = self.energies[:, 1] - self.energies[:, 0]
+		return np.abs(ediff) if absolute else ediff
+
+	def freq_ghz(self):
+		"""Frequency in GHz"""
+		ediff = np.abs(self.energies[:, 1] - self.energies[:, 0])
+		return ediff / (2. * np.pi * hbar)
+
+	def lambda_nm(self):
+		"""Wave length in nm"""
+		ediff = np.abs(self.energies[:, 1] - self.energies[:, 0])
+		return cLight * (2. * np.pi * hbar) / ediff
+
+	def amp_density(self, signed = False):
+		"""Transition matrix (v^2) amplitude density, taking into account state degeneracy and occupancy.
+		Units are nm^2 ns^-2 nm^-2 = ns^-2, since the LL area degeneracy is already factored in."""
+		global _trans_bval_warning
+		if self.bval is None:
+			if not _trans_bval_warning:
+				sys.stderr.write("Warning (TransitionsData.amp_density): Magnetic field value (self.bval) undefined. Ignore the degeneracy factor.\n")
+				_trans_bval_warning = True
+			degeneracy = 1
+		elif isinstance(self.bval, Vector):
+			degeneracy = (eoverhbar / 2.0 / np.pi) * self.bval.z()
+		elif isinstance(self.bval, (float, int, np.floating, np.integer)):
+			degeneracy = (eoverhbar / 2.0 / np.pi) * self.bval
+		elif isinstance(self.bval, np.ndarray):
+			degeneracy = (eoverhbar / 2.0 / np.pi) * np.array([b.z() if isinstance(b, Vector) else b for b in self.bval])
+		else:
+			raise TypeError("Invalid type for internal variable self.bval")
+		amp_degen = degeneracy * self.amplitudes
+		if signed:
+			amp_degen *= np.sign(self.energies[:, 1] - self.energies[:, 0]) * np.sign(self.llindex[:, 1] - self.llindex[:, 0])
+		return amp_degen if self.occupancy is None else amp_degen * self.occupancy
+
+	def rate_density(self, signed = False):
+		"""Returns a transition rate density per electric field intensity.
+		Unit ns^-1 mV^-2. Energy delta function from FGR holds remaining units.
+		After multiplication with an electric field, the unit would be ns^-1 nm^-2.
+		"""
+		delta_omega = self.delta_e() / hbar
+		delta_omega_inv = np.reciprocal(delta_omega, out=np.zeros_like(delta_omega), where=(delta_omega != 0))
+		return self.amp_density(signed) * np.pi / hbar / 4 * delta_omega_inv ** 2
+
+	def dielectric_function(self, photon_energies, layer_thickness, component = 'xx', gamma = 1):
+		"""Calculate (2D) dielectric tensor components.
+
+		Argument:
+		photon_energies		Where to evaluate the dielectric function. Scalar or array of angular frequencies.
+		layer_thickness		Needed to convert from sheet density to volume density of states.
+		component	'xx', 'yy': Diagonal component (default)
+		 			'xy', 'yx': Off-diagonal component.
+		gamma		Broadening energy in meV (from states lifetime).
+
+		Returns:
+		Dielectic function at photon_energies.
+		"""
+		global _refr_index_warning
+		diel_fun = np.zeros_like(photon_energies, dtype = complex)
+		# Convert inputs to angular frequency scale. Avoid zero photon energy, as 1/omega diverges.
+		omega = np.where(photon_energies != 0, photon_energies, 1e-6) / hbar
+		gamma = gamma / hbar
+		if self.refr_index is None:
+			if not _refr_index_warning:
+				sys.stderr.write("Warning (TransitionsData.absorption): Refractive index not given, assume 1 by default.\n")
+				_refr_index_warning = True
+			refractive_index = 1
+		else:
+			refractive_index = self.refr_index
+		if component in ['xx', 'yy']:
+			if self.n > 0:
+				for amp, de in zip(self.amp_density(), (self.energies[:, 1] - self.energies[:, 0]) / hbar):
+					if de > 0:  # Negative energies are included implicitly (see Wiki: physics/Optical-transitions).
+						diel_fun -= amp * de * ((omega ** 2 - de ** 2 - gamma ** 2) - 1.0j * (omega * gamma)) / ((omega ** 2 - de ** 2 - gamma ** 2) ** 2 + omega ** 2 * gamma ** 2)  # / de
+				# Note: Do not use eoverhbar here, wrong unit. e is implicit, as we use meV as energy scale
+				diel_fun *= eovereps0 / (2 * hbar * omega**2 * layer_thickness)  # omega
+			diel_fun += refractive_index ** 2  # Background contribution from high energy transitions
+		elif component in ['xy']:
+			if self.n > 0:
+				for amp, de in zip(self.amp_density(signed = True), (self.energies[:, 1] - self.energies[:, 0]) / hbar):
+					if de > 0:  # Negative energies are included implicitly (see Wiki: physics/Optical-transitions).
+						diel_fun -= amp * (gamma * de ** 2 + gamma ** 3 + 1.0j * (omega ** 3 - omega * de ** 2)) / ((omega ** 2 - de ** 2 - gamma ** 2) ** 2 + omega ** 2 * gamma ** 2)  # / de
+				# Note: Do not use eoverhbar here, wrong unit. e is implicit, as we use meV as energy scale
+				diel_fun *= eovereps0 / (2 * hbar * omega**2 * layer_thickness)  # omega
+			# diel_fun += 1.0j * eovereps0 / omega * 1.0/137.0 / (2 * np.pi) * 300  # add a hall conductance (experimental), wrong unit? /hbar
+		elif component in ['yx']:
+			return -1 * self.dielectric_function(photon_energies, layer_thickness, 'xy', gamma)
+		else:
+			raise NotImplementedError("Dielectric functions only implemented for xx/xy components.")
+		return diel_fun
+
+	def absorption(self, signed=False):
+		"""This is the (dimensionless) 2D absorption coefficient alpha
+		   = Gamma(omega) n_dens / Phi_0(omega)
+		   = (1 / epsilon0) * (2 / c n_refr) * (hbar omega) * n_dens * gamma,
+		where n_dens * gamma is rate_dens
+		Units: mV nm e^-1 * ns nm^-1 * meV * nm^-2 * nm^2 mV^-2 ns^-1,
+		yields 1.
+		Note:
+		- eovereps0 is used as proxy for the value 1 / epsilon0. We keep the
+		  numerical value of eovereps0, but use the unit mV nm e^-1.
+		- As the local photon density (not flux!)
+		  Phi_0 = 1/2 * epsilon0 * n_ref^2 * |E|^2 / (hbar omega)
+		  also depends on electric field intensity, the 'missing' factor
+		  in self.rate_density cancels out.
+		Intensity after transmission is calculated as I = exp(-alpha) I_0.
+		"""
+		global _refr_index_warning
+		if self.refr_index is None:
+			if not _refr_index_warning:
+				sys.stderr.write("Warning (TransitionsData.absorption): Refractive index not given, assume 1 by default.\n")
+				_refr_index_warning = True
+			refractive_index = 1
+		else:
+			refractive_index = self.refr_index
+		alpha = eovereps0 * 2.0 / (cLight * refractive_index) * self.delta_e() * self.rate_density(signed)
+		return alpha
+
+	def get_values(self, qty):
+		"""Get physical quantity 'qty'"""
+		if qty in ['deltae', 'delta_e']:
+			val = self.delta_e()
+		elif qty in ['freq', 'freqghz', 'freq_ghz']:
+			val = self.freq_ghz()
+		elif qty in ['freqthz', 'freq_thz']:
+			val = self.freq_ghz() * 1e-3
+		elif qty in ['lambda', 'wavelength', 'lambdanm', 'lambda_nm']:
+			val = self.lambda_nm()
+		elif qty in ['lambdaum', 'lambda_um', 'lambda\xb5m', 'lambda_\xb5m']:
+			val = self.lambda_nm() * 1e-3
+		elif qty == 'occupancy':
+			val = self.occupancy
+		elif qty == 'amplitude':
+			val = self.amplitudes
+		elif qty in ['rate', 'ratedensity', 'rate_density']:
+			val = self.rate_density()
+		elif qty == 'absorption':
+			val = self.absorption()
+		elif qty == 'sign':
+			val = np.sign(self.energies[:, 1] - self.energies[:, 0]) * np.sign(self.llindex[:, 1] - self.llindex[:, 0])
+		else:
+			raise ValueError("Invalid value for argument 'qty'")
+		return float("nan") * np.ones(self.n) if val is None else val
+
+	def print_all(self, ampmin = 0.05, llmax = None, more = True):
+		"""Print properties of all transitions
+
+		Arguments:
+		ampmin   Lower bound for amplitudes. Transitions with lower amplitudes
+		         are not printed. If None, take value from configuration.
+		llmax    Upper bound for LL index. If set, do not print transitions that
+		         contain a higher LL index. If None, do not restrict by LL
+		         index.
+		more     True or False. If True, print more properties. If False, print
+		         fewer properties.
+		"""
+		if ampmin is None:
+			ampmin = get_config_num("transitions_min_amplitude", minval = 0)
+		for ee, ll, aa in zip(self.energies, self.llindex, self.amplitudes):
+			if llmax is not None and (ll[0] > llmax or ll[1] > llmax):
+				continue
+			if aa > ampmin:
+				if more:
+					rate = (2. * np.pi / hbar) * aa / 2 / np.abs(ee[1] - ee[0])**2
+					freq_ghz = np.abs(ee[1] - ee[0]) / (2. * np.pi * hbar)
+					print("%2i %8.3f and %2i %8.3f  |  deltaE =%8.3f meV  freq =%6.2f THz  lambda =%6i nm  |  amp = %5.3f  |  rate = %6.3f" % (ll[0], ee[0], ll[1], ee[1], ee[1] - ee[0], freq_ghz * 1e-3, round(cLight / freq_ghz), aa, rate))
+				else:
+					print("%2i %8.3f and %2i %8.3f  |  deltaE =%8.3f  |  amp = %5.3f" % (ll[0], ee[0], ll[1], ee[1], ee[1] - ee[0], aa))
+
+	def absorption_spectrum(self, energies, which = 'sum', broadening_type = 'lorentzian', broadening_scale = 2.5):
+		"""Calculate absorption spectrum.
+		This returns a two-dimensional array of the absorption as function of
+		magnetic field and energy (B, E).
+
+		Arguments:
+		energies          Array of energy values on the vertical axis
+		which             One of '+' (up transitions), '-' (down transitions),
+		                  'sum' or 'both' (up + down), 'delta' (up - down)
+		broadening_type   Type of broadening function, e.g., 'step', 'gauss',
+		                  'fermi', or 'lorentz'
+		broadening_scale  Scale parameter of the broadening function; typically,
+		                  the width (in energy).
+
+		Returns:
+		A 2-dim array.
+		"""
+		if which not in ['+', '-', 'sum', 'both', 'diff', 'delta']:
+			raise ValueError("Invalid value for argument 'which'")
+		if broadening_type not in ['step', 'delta', 'gauss', 'gaussian', 'normal', 'fermi', 'logistic', 'sech', 'thermal', 'lorentz', 'lorentzian']:
+			raise ValueError("Invalid value for argument 'broadening_type'")
+		if not isinstance(energies, np.ndarray):
+			raise TypeError("Argument 'energies' must be a numpy array")
+
+		signs = np.sign(self.llindex[:, 1] - self.llindex[:, 0]) * np.sign(self.energies[:, 1] - self.energies[:, 0])  # 'up'/+ or 'down'/- transitions
+		absorption = self.absorption()
+		deltae = np.abs(self.energies[:, 1] - self.energies[:, 0])
+		if which == 'delta':  # TODO: diff
+			return self.absorption_spectrum(energies, '+', broadening_type, broadening_scale) - self.absorption_spectrum(energies, '-', broadening_type, broadening_scale)
+		if which == 'sum' or which == 'both':
+			sel = np.ones_like(deltae, dtype = bool)
+		elif which == '+':
+			sel = (signs == 1)
+		elif which == '-':
+			sel = (signs == -1)
+		de, erange = np.meshgrid(deltae[sel], energies)
+		brf = BroadeningFunction(broadening_type, broadening_scale)
+		all_occ = brf.occupation(de, erange)
+		spec = np.gradient(np.sum(all_occ * absorption[np.newaxis, sel], axis = 1)) / np.gradient(energies)
+		return spec
+
+
+### GET TRANSITIONS ###
+def get_transitions(eidata, magn, h_sym, which = None, ampmin = None, deltaemin = None, norb = 8, nll = None):
+	"""Calculate all transition matrix elements |O+-|^2.
+	Note that an implicit multiplication with delta(hbar omega_fi - hbar omega) (delta distribution from
+	Fermi's Golden Rule) is performed, resulting in a unit of nm^2 ns^-2 meV^-1.
+	Version for symbolic LL mode.
+
+	Arguments:
+	eidata      DiagDataPoint instance with eigenvector data (eidata.eivec is
+	            not None)
+	magn        Number or Vector. The magnetic field.
+	h_sym		Symbolic Hamiltonian used to derive transition matrix.
+	which       None or 2-tuple. If set, the energy range that is considered.
+	            Transitions which lie outside this range will be ignored.
+	ampmin      None or number. Lower bound for the amplitude (result of Fermi's
+	            golden rule). Transitions whose amplitude is lower are
+	            discarded. If None, use the value from configuration.
+	deltaemin   None or number. Lower bound of the energy difference.
+	            Transitions with lower energy difference are discarded. If None,
+	            use the value from configuration.
+	norb        Either 6 or 8. Number of orbitals.
+	nll			Ignored. Compatibility with 'full' version.
+
+	Returns:
+	A TransitionsData instance.
+	"""
+	# Handle band selection
+	if isinstance(which, tuple) and len(which) == 2:
+		if isinstance(which[0], (float, np.floating)) or isinstance(which[1], (float, np.floating)):
+			eidata1 = eidata.select_eival(which)
+		elif isinstance(which[0], (int, np.integer)) or isinstance(which[1], (int, np.integer)):
+			eidata1 = eidata.select_bindex(which)
+		else:
+			raise TypeError("Argument which can be a 2-tuple of integers or floats, one of which might be replaced by None.")
+	elif which is None:
+		eidata1 = eidata
+	else:
+		raise TypeError("Argument which can be a 2-tuple of integers or floats, one of which might be replaced by None.")
+
+	if ampmin is None:
+		ampmin = get_config_num("transitions_min_amplitude", minval = 0)
+	if deltaemin is None:
+		deltaemin = get_config_num("transitions_min_deltae", minval = 0)
+
+	eidata2 = eidata1  # 'Rename' for consistency
+
+	if eidata1.eivec is None:
+		sys.stderr.write("ERROR (get_transitions): Missing eigenvectors.\n")
+		exit(1)
+	if eidata1.llindex is None:
+		sys.stderr.write("ERROR (get_transitions): Missing LL indices.\n")
+		exit(1)
+
+	# Initialize
+	ll_min, ll_max = min(eidata2.llindex), max(eidata2.llindex)
+	# Transition matrices:
+	# op_plus is linked to pol_p circular light polarisation per definition and the increment of one LL (axial approx.).
+	# For op_minus it is exactly the other way round.
+	# As op_± = op_x ± i op_y and op_x|y := dH/dk_x|y it follows that op_± := 2*dH/dk_∓
+	op_plus  = 2/hbar * h_sym.deriv('-')
+	op_minus = 2/hbar * h_sym.deriv('+')
+	# op_x = 1/hbar * h_sym.deriv('x')
+	# op_y = 1/hbar * h_sym.deriv('y')
+	delta_n_vec = delta_n_ll(norb, magn)
+	all_amp = []
+	all_eival1 = []
+	all_eival2 = []
+	all_ll1 = []
+	all_ll2 = []
+	for n in range(ll_min, ll_max + 1):
+		bands1 = (eidata1.llindex == n)
+		eival1 = eidata1.eival[bands1]
+		for dn in [1]:  # no need to calculate both -1 and 1 (see get_transitions_full)
+			if ll_min <= n + dn <= ll_max:
+				bands2 = (eidata2.llindex == n + dn)
+				eival2 = eidata2.eival[bands2]
+				e2, e1 = np.meshgrid(eival1, eival2)  # inverted order is intended
+				delta_e = e2 - e1
+
+				eivec1T = eidata1.eivec[:, bands1]
+				eivec2T = eidata2.eivec[:, bands2]
+				eivec2T_H = eivec2T.conjugate().transpose()
+
+				## 'Transition matrix' [see, e.g., Luo and Furdyna, Phys. Rev. B (1990)]
+				op = op_plus.ll_evaluate((n+dn, n), magn, delta_n_vec, all_dof = True) if dn == 1 \
+					else op_minus.ll_evaluate((n+dn, n), magn, delta_n_vec, all_dof = True)
+				opeivec1T = op @ eivec1T
+
+				# For debugging:
+				# xop = op_x.ll_evaluate((n + dn, n), magn, delta_n_vec, all_dof=True)
+				# xopeivec1T = xop @ eivec1T
+				# yop = op_y.ll_evaluate((n + dn, n), magn, delta_n_vec, all_dof=True)
+				# yopeivec1T = yop @ eivec1T
+				# xov = eivec2T_H @ xopeivec1T
+				# xov2 = np.real(np.abs(xov)**2)
+				# yov = eivec2T_H @ yopeivec1T
+				# yov2 = np.real(np.abs(yov) ** 2)
+
+				# n1 = eivec1T.shape[1]
+				# n2 = eivec2T.shape[1]
+				# eivec1norm2 = np.array([(eivec1T[:,j].conjugate().transpose() @ eivec1T[:,j])[0,0] for j in range(0, n1)])
+				# eivec2norm2 = np.array([(eivec2T[:,j].conjugate().transpose() @ eivec2T[:,j])[0,0] for j in range(0, n2)])
+				# print (n, n+dn)
+				# print ("n_1:", eivec1norm2)
+				# print ("n_2:", eivec2norm2)
+
+				ov = eivec2T_H @ opeivec1T
+				ov2 = np.real(np.abs(ov)**2)
+
+				sel = (ov2 >= ampmin) & (np.abs(delta_e) >= deltaemin)
+				if np.count_nonzero(sel) > 0:
+					all_eival1.append(e1[sel])
+					all_eival2.append(e2[sel])
+					all_amp.append(ov2[sel])
+					all_ll1.append(np.full(np.count_nonzero(sel), n + dn))
+					all_ll2.append(np.full(np.count_nonzero(sel), n))
+
+	if len(all_eival1) == 0:
+		return TransitionsData((np.array([]), np.array([])), np.array([]), (np.array([]), np.array([])), bval = magn)
+
+	return TransitionsData((np.concatenate(all_eival1), np.concatenate(all_eival2)), np.concatenate(all_amp), (np.concatenate(all_ll1), np.concatenate(all_ll2)), bval = magn)
+
+def get_transitions_full(eidata, magn, h_sym, which = None, ampmin = None, deltaemin = None, norb = 8, nll = None):
+	"""Calculate all transition matrix elements |O+-|^2.
+	Note that an implicit multiplication with delta(hbar omega_fi - hbar omega) (delta distribution from
+	Fermi's Golden Rule) is performed, resulting in a unit of nm^2 ns^-2 meV^-1.
+	Version for full LL mode.
+
+	Arguments:
+	eidata      DiagDataPoint instance with eigenvector data (eidata.eivec is
+	            not None)
+	magn        Number or Vector. The magnetic field.
+	h_sym		Symbolic hamiltonian used to derive transition matrix.
+	which       None or 2-tuple. If set, the energy range that is considered.
+	            Transitions which lie outside this range will be ignored.
+	ampmin      None or number. Lower bound for the amplitude (result of Fermi's
+	            golden rule). Transitions whose amplitude is lower are
+	            discarded. If None, use the value from configuration.
+	deltaemin   None or number. Lower bound of the energy difference.
+	            Transitions with lower energy difference are discarded. If None,
+	            use the value from configuration.
+	nll         Integer. The number of LLs in the model. This value is necessary
+	            for determining the matrix size.
+	norb        Either 6 or 8. Number of orbitals.
+
+	Note:
+	Only one of polarization_pm and polarization_xy may be set. If both are
+	None, assume Pol = sqrt(1/2) sigma_+ + sqrt(1/2) sigma_-.
+
+	Returns:
+	A TransitionsData instance. The values for LL index are set to either (0, 1)
+	or (1, 0), depending on whether the transition raises or lowers the LL
+	index. These values are not the actual values for the LL index, which may
+	not be a conserved quantum number in the full LL mode.
+	"""
+	# Handle band selection
+	if isinstance(which, tuple) and len(which) == 2:
+		if isinstance(which[0], (float, np.floating)) or isinstance(which[1], (float, np.floating)):
+			eidata1 = eidata.select_eival(which)
+		elif isinstance(which[0], (int, np.integer)) or isinstance(which[1], (int, np.integer)):
+			eidata1 = eidata.select_bindex(which)
+		else:
+			raise TypeError("Argument which can be a 2-tuple of integers or floats, one of which might be replaced by None.")
+	elif which is None:
+		eidata1 = eidata
+	else:
+		raise TypeError("Argument which can be a 2-tuple of integers or floats, one of which might be replaced by None.")
+
+	if ampmin is None:
+		ampmin = get_config_num("transitions_min_amplitude", minval = 0)
+	if deltaemin is None:
+		deltaemin = get_config_num("transitions_min_deltae", minval = 0)
+
+	eidata2 = eidata1  # 'Rename' for consistency
+
+	if eidata1.eivec is None:
+		sys.stderr.write("ERROR (get_transitions): Missing eigenvectors.\n")
+		exit(1)
+	if nll is None:
+		raise ValueError("Argument nll must be specified explicitly")
+
+	# Initialize
+	if eidata1.eivec.shape[0] % (nll * norb) != 0:
+		raise ValueError("Value of nll incompatible with matrix size")
+
+	# Transition matrices:
+	# op_plus is linked to pol_p circular light polarisation per definition and the increment of one LL (axial approx.).
+	# For op_minus it is exactly the other way round.
+	# As op_± = op_x ± i op_y and op_x|y := dH/dk_x|y it follows that op_± := 2*dH/dk_∓
+	op_plus = 2 / hbar * h_sym.deriv('-')
+	op_minus = 2 / hbar * h_sym.deriv('+')
+
+	all_amp = []
+	all_eival1 = []
+	all_eival2 = []
+	all_ll1 = []
+	all_ll2 = []
+	e2, e1 = np.meshgrid(eidata1.eival, eidata2.eival)  # inverted order is intended
+	delta_e = e2 - e1
+
+	eivec1T = eidata1.eivec
+	eivec2T = eidata2.eivec
+	eivec2T_H = eivec2T.conjugate().transpose()
+
+	# For debugging:
+	# n1 = eivec1T.shape[1]
+	# n2 = eivec2T.shape[1]
+	# eivec1norm2 = np.array([(eivec1T[:,j].conjugate().transpose() @ eivec1T[:,j])[0,0] for j in range(0, n1)])
+	# eivec2norm2 = np.array([(eivec2T[:,j].conjugate().transpose() @ eivec2T[:,j])[0,0] for j in range(0, n2)])
+	# print("n_1:", eivec1norm2)
+	# print("n_2:", eivec2norm2)
+	for dn in [1]:  # Note that transition matrices also contain contributions of dn = ∓3 (see wiki - Optical Transitions)
+		## 'Transition matrix' [see, e.g., Luo and Furdyna, Phys. Rev. B (1990)]
+		op_sym = op_plus if dn == 1 else op_minus
+		# Note that op_± are not hermitian but op_±^\dag = op_∓. Care must be taken during construction, as most
+		# constructors in kdotpy build hermitian matrices (hamiltonians) by default.
+		op = hz_sparse_ll_full(op_sym, nll-3, magn, norb, all_dof = True, is_hermitian = False)
+		opeivec1T = op @ eivec1T
+
+		# This dense matrix multiplication consumes most (= almost all) of the
+		# calculation time. The matrices are dense by nature; turning them into
+		# sparse matrices is not possible. The only way to save calculation time
+		# is to reduce the number of eigenstates under consideration.
+		# The matrix sizes are (neig1, dim) * (dim, neig2) -> (neig1, neig2)
+		ov = eivec2T_H @ opeivec1T
+		ov2 = np.real(np.abs(ov)**2)
+
+		# Note on attribution of transitions: We actually calculate both
+		# <e1 |op_+| e2> and <e2 |op_+| e1> = (<e1 |op_+^dag| e2>)*
+		# = (<e1 |op_-| e2>)*. However, we only make use of absolute
+		# square of the element. The way we attribute differences in
+		# LLs and energies to this amplitude can be used to reduce the
+		# for dn in [-1,1] loop to a single execution. We must adjust the
+		# sign of dn according to the sign of delta_e to yield correct
+		# polarization sign of the transition.
+		# Eival (energies) are attributed in a way such that for following
+		# functions (such as transition filtering) the energy difference
+		# is positive.
+		# The approach could theoretically be changed to be more in line
+		# with the axial LL mode, but this would require different
+		# construction methods for the op matrix (upper/lower block
+		# triangular) or cumbersome filtering of the eigenvectors that
+		# are applied to the op matrix. No significant performance boost
+		# is expected from other methods.
+		# Further note that the LL number n is not a good quantum number
+		# in full LL mode. Hence, we use ll1 indices only to keep track
+		# of the polarity of the transition in a way compatible with the
+		# axial approximation
+		sel = (ov2 >= ampmin) & (np.abs(delta_e) >= deltaemin)
+		if np.count_nonzero(sel) > 0:
+			all_eival1.append(np.minimum(e1[sel], e2[sel]))
+			all_eival2.append(np.maximum(e1[sel], e2[sel]))
+			all_amp.append(ov2[sel])
+			all_ll1.append(dn * np.sign(delta_e[sel]))  # TODO
+			all_ll2.append(np.full(np.count_nonzero(sel), 0))       # TODO
+
+	if len(all_eival1) == 0:
+		return TransitionsData((np.array([]), np.array([])), np.array([]), (np.array([]), np.array([])), bval = magn)
+
+	return TransitionsData((np.concatenate(all_eival1), np.concatenate(all_eival2)), np.concatenate(all_amp), (np.concatenate(all_ll1), np.concatenate(all_ll2)), bval = magn)
+
+def get_transitions_labels(data, canonical_order = True):
+	"""Get all transitions labels (LL1, B1, LL2, B2) from DiagData.
+
+	Arguments:
+	data   DiagData instance with DiagDataPoint members that contain
+	       TransitionsData (ddp.transitions is not None).
+	canonical_order  False or True. Whether to put transition into canonical
+	                 order (LL index 1 < LL index 2).
+
+	Returns:
+	List of tuples (LL index 1, band index 1, LL index 2, band index 2)
+	"""
+	all_labels = []
+	for d in data:
+		td = d.transitions
+		if td is None or td.n == 0:
+			continue
+		if td.bindex is None:
+			sys.stderr.write("Warning (get_transitions_labels): Band indices are required, but not present.\n")  # TODO: Handle this case by examining energies
+			continue
+		td1 = td.canonical_order() if canonical_order else td
+		labels = np.vstack([td1.llindex[:, 0], td1.bindex[:, 0], td1.llindex[:, 1], td1.bindex[:, 1]]).transpose()
+		all_labels.append(labels)
+	all_labels = np.vstack(all_labels)
+	try:
+		all_labels = np.unique(all_labels, axis = 0)  # requires numpy version >= 1.13.0
+	except:
+		for j in [3, 2, 1, 0]:
+			order = np.argsort(all_labels[:, j], kind = 'mergesort')
+			all_labels = all_labels[order]
+		sel = np.concatenate([[True], np.any(np.diff(all_labels, axis = 0) != 0, axis = 1)])
+		all_labels = all_labels[sel]
+	return [tuple(lb) for lb in all_labels]
+
+class TransitionByLabel(TransitionsData):
+	"""Container for a 'horizontal' data structure that contains all data for a single transition.
+
+	Inherits from TransitionsData class. In contrast, here the row index of the
+	arrays iterates over the points in the input data (DiagData instance), for
+	example magnetic-field values.
+
+	Attributes:
+	n           Integer. Number of points in 'horizontal' direction. This is the
+	            same as the length of the input data (DiagData instance).
+	energies, amplitudes, llindex, occupancy, bval, refr_index: See parent class
+	            (TransitionsData). Note that the row index of the arrays is
+	            interpreted differently.
+	where       Array of type boolean. True at all points where the transition
+	            is defined.
+
+	Arguments (__init__):
+	data   DiagData instance with DiagDataPoint members that contain
+	       TransitionsData (ddp.transitions is not None).
+	lb     4-tuple. Transition label of the form (LL1, B1, LL2, B2).
+	"""
+	def __init__(self, data, lb):
+		if not (isinstance(lb, tuple) and len(lb) == 4):
+			raise TypeError("Transitions label (argument lb) must be a tuple of length 4.")
+		self.n = len(data)
+		self.where = np.zeros(self.n, dtype = bool)
+		self.bval = np.array([d.paramval for d in data])
+		self.energies = float("nan") * np.ones((self.n, 2), dtype = float)
+		self.amplitudes = float("nan") * np.ones(self.n, dtype = float)
+		self.occupancy = float("nan") * np.ones(self.n, dtype = float)
+		self.llindex = np.zeros((self.n, 2), dtype = int)
+		self.bindex = np.zeros((self.n, 2), dtype = int)
+		self.refr_index = None
+		refr_index = []
+
+		for j, d in enumerate(data):
+			td = d.transitions
+			if td is None or td.n == 0:
+				continue
+			if td.bindex is None:
+				sys.stderr.write("Warning (get_transition_by_label): Band indices are required, but not present.\n")  # TODO: Handle this case by examining energies
+				continue
+
+			sel1 = ((td.llindex[:, 0] == lb[0]) & (td.bindex[:, 0] == lb[1]) & (td.llindex[:, 1] == lb[2]) & (td.bindex[:, 1] == lb[3]))
+			sel2 = ((td.llindex[:, 0] == lb[2]) & (td.bindex[:, 0] == lb[3]) & (td.llindex[:, 1] == lb[0]) & (td.bindex[:, 1] == lb[1]))
+			sel = sel1 | sel2
+			if np.count_nonzero(sel) > 1:
+				raise ValueError("Transition index is not unique.\n")
+			elif np.count_nonzero(sel) == 1:
+				self.llindex[j] = td.llindex[sel][0]
+				self.bindex[j] = td.bindex[sel][0]
+				self.energies[j] = td.energies[sel][0]
+				self.amplitudes[j] = td.amplitudes[sel][0]
+				if td.occupancy is not None:
+					self.occupancy[j] = td.occupancy[sel][0]
+				self.where[j] = True
+				if td.refr_index is not None:
+					refr_index.append(td.refr_index)
+		if np.all(np.isnan(self.occupancy)):
+			self.occupancy = None
+		if len(refr_index) == 0:
+			self.refr_index = None
+		elif all([ri == refr_index[0] for ri in refr_index]):
+			self.refr_index = refr_index[0]
+		else:
+			sys.stderr.write("Warning (TransitionsByLabel.__init__): Refractive index is ambiguous.\n")
+			self.refr_index = None
+
+	def find_maximum(self, qty):
+		"""Get an array of values for quantity 'qty' and determine its maximum.
+
+		Argument:
+		qty   String. The quantity for which to return the maximum.
+
+		Returns:
+		maxval   Maximal value of quantity qty
+		maxxval  Momentum or magnetic-field value at the maximum
+		deltae   Signed energy difference at the maximum
+		"""
+		if np.count_nonzero(self.where) == 0:
+			return None, None, None
+		val = self.get_values(qty)
+		val = val[self.where]
+		if np.all(np.isnan(val)):
+			return float("nan"), None, None
+		xval = self.bval[self.where]
+		enval = self.energies[self.where]
+		max_idx = np.argsort(val)[-1]
+		return val[max_idx], xval[max_idx], enval[max_idx, 1] - enval[max_idx, 0]
+
+	def absorption_spectrum(*args, **kwds):
+		"""Override absorption_spectrum() from parent class (TransitionsData)"""
+		raise NotImplementedError("Function 'absorption_spectrum' not available for TransitionByLabel class")
diff --git a/kdotpy-v1.0.0/src/kdotpy/types.py b/kdotpy-v1.0.0/src/kdotpy/types.py
new file mode 100644
index 0000000000000000000000000000000000000000..2c1ff226e0fbc33f2eed23236d21d48c3ba03d68
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/types.py
@@ -0,0 +1,410 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+from abc import ABC, abstractmethod
+from typing import Any, Optional, Union
+import numpy as np
+
+class DiagDataPoint(ABC):
+	"""ABC for diagdata.DiagDataPoint"""
+
+	k: Any  # float, Vector
+	paramval: Any
+	eival: Optional[np.ndarray]
+	eivec: Optional[np.ndarray]
+	neig: int
+	dim: Optional[int]
+	obsvals: Optional[np.ndarray]
+	_obsids: Optional[list[str]]
+	bindex: Optional[np.ndarray]
+	llindex: Optional[np.ndarray]
+	aligned_with_e0: bool
+	char: Union[np.ndarray, list, None]
+	transitions: Any  # TransitionsData, None
+	wffigure: Any  # int, str, matplotlib figure object
+	current_step: Optional[int]
+	ham: Any  # np.ndarray, scipy sparse matrix object
+	grid_index: int
+	tuple_index: Optional[dict]
+	opts: dict
+
+	@property
+	@abstractmethod
+	def obsids(self):
+		pass
+
+	@abstractmethod
+	def __str__(self):
+		pass
+
+	@abstractmethod
+	def hash_id(self, length=6, precision='%.12e'):
+		pass
+
+	@abstractmethod
+	def file_id(self):
+		pass
+
+	@abstractmethod
+	def stitch_with(self, k, eival, eivec, targetenergy_old, targetenergy_new, inplace=False, accuracy=0.01):
+		pass
+
+	@abstractmethod
+	def update(self, new_ddp):
+		pass
+
+	@abstractmethod
+	def extend_by(self, k, eival, eivec, paramval = None, obsvals = None, obsids = None, char = None, llindex = None, bindex = None, accuracy = 1e-6):
+		pass
+
+	@abstractmethod
+	def extend(self, *args, **kwds):
+		pass
+
+	@abstractmethod
+	def set_observables(self, obsvals, obsids = None):
+		pass
+
+	@abstractmethod
+	def calculate_observables(self, params, obs, obs_prop = None, overlap_eivec = None, magn = None):
+		pass
+
+	@abstractmethod
+	def add_observable(self, obsvals = None, obsid = None):
+		pass
+
+	@abstractmethod
+	def delete_eivec(self):
+		pass
+
+	@abstractmethod
+	def build_tuple_index_cache(self):
+		pass
+
+	# Some 'get' functions
+	@abstractmethod
+	def get_index(self, val):
+		pass
+
+	@abstractmethod
+	def get_index_with_llindex(self, val, llindex):
+		pass
+
+	@abstractmethod
+	def get_ubindex(self):
+		pass
+
+	@abstractmethod
+	def get_eival(self, val):
+		pass
+
+	@abstractmethod
+	def get_eival0(self):
+		pass
+
+	@abstractmethod
+	def get_char(self, val):
+		pass
+
+	@abstractmethod
+	def get_all_char(self):
+		pass
+
+	@abstractmethod
+	def get_observable(self, obs, val = None):
+		pass
+
+	@abstractmethod
+	def set_observable_value(self, obs, bandval, obsval):
+		pass
+
+	@abstractmethod
+	def subset(self, sel):
+		pass
+
+	@abstractmethod
+	def subset_inplace(self, sel):
+		pass
+
+	@abstractmethod
+	def select_llindex(self, ll):
+		pass
+
+	@abstractmethod
+	def select_bindex(self, b):
+		pass
+
+	@abstractmethod
+	def select_obs(self, obs, val, accuracy = None):
+		pass
+
+	@abstractmethod
+	def select_eival(self, val):
+		pass
+
+	@abstractmethod
+	def select_char(self, which, inplace = False):
+		pass
+
+	@abstractmethod
+	def sort_by_eival(self, inplace = False, reverse = False):
+		pass
+
+	@abstractmethod
+	def sort_by_obs(self, obs, inplace = False):
+		pass
+
+	@abstractmethod
+	def set_eivec_phase(self, accuracy = 1e-6, inplace = False):
+		pass
+
+	@abstractmethod
+	def get_eivec_coeff(self, norbitals, accuracy = 1e-6, ll_full = False, ny = None):
+		pass
+
+	@abstractmethod
+	def set_char(self, chardata, eival = None, llindex = None, eival_accuracy = 1e-6):
+		pass
+
+	@abstractmethod
+	def set_bindex(self, bindexdata, eival = None, llindex = None, aligned_with_e0 = False):
+		pass
+
+	@abstractmethod
+	def set_llindex(self, llindex):
+		pass
+
+	@abstractmethod
+	def set_eivec(self, eivec, val = None):
+		pass
+
+	@abstractmethod
+	def to_binary_file(self, filename):
+		pass
+
+
+### DIAGDATA ###
+class DiagData(ABC):
+	"""ABC for diagdata.DiagData"""
+
+	data: list[DiagDataPoint]
+	shape: tuple
+	strides: tuple
+	grid: Any  # TODO: VectorGrid
+	gridvar: Optional[str]
+	bindex_cache: Optional[list]
+
+	@abstractmethod
+	def align_with_grid(self):
+		pass
+
+	@abstractmethod
+	def sort_by_grid(self):
+		pass
+
+	@abstractmethod
+	def get_momenta(self):
+		pass
+
+	@abstractmethod
+	def get_momentum_grid(self):
+		pass
+
+	@abstractmethod
+	def get_paramval(self, component = None):
+		pass
+
+	@abstractmethod
+	def get_xval(self, index = None):
+		pass
+
+	@abstractmethod
+	def get_degrees(self, default = None):
+		pass
+
+	@abstractmethod
+	def get_zero_point(self, return_index = False, ignore_paramval = False):
+		pass
+
+	@abstractmethod
+	def get_base_point(self, return_index = False):
+		pass
+
+	@abstractmethod
+	def get_total_neig(self):
+		pass
+
+	@abstractmethod
+	def select_llindex(self, llval):
+		pass
+
+	@abstractmethod
+	def select_eival(self, val):
+		pass
+
+	@abstractmethod
+	def set_char(self, chardata, eival = None, llindex = None, eival_accuracy = 1e-6):
+		pass
+
+	@abstractmethod
+	def get_all_char(self):
+		pass
+
+	@abstractmethod
+	def get_all_llindex(self):
+		pass
+
+	@property
+	@abstractmethod
+	def aligned_with_e0(self):
+		pass
+
+	@abstractmethod
+	def reset_bindex(self):
+		pass
+
+	@abstractmethod
+	def get_all_bindex(self):
+		pass
+
+	@abstractmethod
+	def check_bindex(self):
+		pass
+
+	@abstractmethod
+	def get_eival_by_bindex(self, b = None):
+		pass
+
+	@abstractmethod
+	def get_observable_by_bindex(self, obs = None, b = None):
+		pass
+
+	@abstractmethod
+	def find(self, kval, paramval = None, return_index = False, strictmatch = False):
+		pass
+
+	@abstractmethod
+	def get_data_labels(self, by_index = None):
+		pass
+
+	@abstractmethod
+	def get_plot_coord(self, label, mode):
+		pass
+
+	@abstractmethod
+	def get_observable(self, obs, label, mode):
+		pass
+
+	@abstractmethod
+	def set_observable_values(self, obsid, obsval, label):
+		pass
+
+	@abstractmethod
+	def shift_energy(self, delta):
+		pass
+
+	@abstractmethod
+	def set_zero_energy(self, delta = 0.0):
+		pass
+
+	@abstractmethod
+	def set_shape(self, shape = None):
+		pass
+
+	@abstractmethod
+	def symmetry_test(self, tfm, observables = None, ignore_lower_dim = False, verbose = False):
+		pass
+
+	@abstractmethod
+	def symmetrize(self, axis = None, copy_eivec = True):
+		pass
+
+	@abstractmethod
+	def get_cnp(self):
+		pass
+
+	## Forward of 'list-like' functions
+	@abstractmethod
+	def __len__(self):
+		pass
+
+	@abstractmethod
+	def index(self, x):
+		pass
+
+	@abstractmethod
+	def __iter__(self):
+		pass
+
+	@abstractmethod
+	def __getitem__(self, i):
+		pass
+
+	@abstractmethod
+	def append(self, data, strictmatch = False):
+		pass
+
+	@abstractmethod
+	def extend(self, data):
+		pass
+
+	@abstractmethod
+	def __add__(self, other):
+		pass
+
+	@abstractmethod
+	def __radd__(self, other):
+		pass
+
+	@abstractmethod
+	def __iadd__(self, other):
+		pass
+
+	@abstractmethod
+	def interpolate(self, subdiv = 1, obs = False):
+		pass
+
+	@abstractmethod
+	def to_binary_file(self, filename):
+		pass
+
+	@abstractmethod
+	def diagonalize(self, model, solver, opts_list = None):
+		pass
diff --git a/kdotpy-v1.0.0/src/kdotpy/version.py b/kdotpy-v1.0.0/src/kdotpy/version.py
new file mode 100644
index 0000000000000000000000000000000000000000..61648eb64723d99c90c2b1f0d6ca8bab6605e39e
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/version.py
@@ -0,0 +1,61 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+from importlib.metadata import version
+import os
+import subprocess
+from platform import system
+
+def get_version():
+	"""Get version from git or from package if this is not a git directory"""
+	scriptdir = os.path.dirname(os.path.realpath(__file__))
+	shell = (system() == "Windows")
+	cmd = ['git', 'describe']
+	try:
+		proc = subprocess.run(cmd, shell=shell, cwd=scriptdir, capture_output=True, text=True)
+	except:
+		output = ""
+	else:
+		output = proc.stdout.split('\n')[0].strip('" ').lstrip('" v')
+
+	if not output:
+		return version("kdotpy")  # read version from installed package
+	else:
+		return output
diff --git a/kdotpy-v1.0.0/src/kdotpy/wf.py b/kdotpy-v1.0.0/src/kdotpy/wf.py
new file mode 100644
index 0000000000000000000000000000000000000000..575d47e27b3bbebce5bf8e149755e0aad1d6b216
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/wf.py
@@ -0,0 +1,271 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import sys
+import numpy as np
+
+from .config import get_config_int, get_config_bool
+from .observables import observables
+from .momentum import Vector, VectorGrid, locations_index
+from . import ploto
+from . import tableo
+
+
+def twodim_ddp(diagdatapoint, params, style = None, filename = "", erange = None, **kwds):
+	"""Plot wave function for a DiagDataPoint in a 2D or LL calculation.
+
+	Arguments:
+	diagdatapoint  DiagDataPoint instance. Contains the data. The instance must
+	               contain eigenvector data, i.e., diagdatapoint.eivec is not
+	               None.
+	params         PhysParams instance.
+	style          'all', 'separate', 'default', or 'together'. Style of the
+	               output; for the first three options, plot psi(z) for all wave
+	               functions in separate plots (which may be bundled as a single
+	               pdf). For 'together', plot a fixed number of |psi(z)|^2 in a
+	               single plot.
+	filename       String. Filename for the wave functions without extension.
+	erange         List or array of two numbers. Energy range for wave function
+	               plots. Do not include states with energy eigenvalue outside
+	               this range.
+	**kwds         Further keyword arguments are passed to the plot function.
+	               (Not to the table/csv function.)
+
+	"""
+	if not isinstance(filename, str):
+		raise TypeError("Argument filename must be a string instance.")
+	elif (filename.lower().endswith(".pdf") or filename.lower().endswith(".csv")) and len(filename) > 4:
+		fname = filename[:-4]
+	elif filename == "":
+		fname = "wfs"
+	else:
+		fname = filename
+
+	if style.lower() in ["all", "separate", "default"]:
+		fig = ploto.wavefunction_z(
+			params, diagdatapoint, filename = fname + ".pdf", eivalrange = erange,
+			**kwds)
+		tableo.wavefunction_z(
+			params, diagdatapoint, filename = fname + ".csv", eivalrange = erange)
+	elif style.lower() == "together":
+		n_states = get_config_int('plot_wf_together_num', minval = 1)
+		fig = ploto.abs_wavefunctions_z(
+			params, diagdatapoint, filename = fname + ".pdf", eivalrange = erange,
+			num = n_states, **kwds)
+		tableo.abs_wavefunctions_z(
+			params, diagdatapoint, filename = fname + ".csv", eivalrange = erange)
+	else:
+		raise ValueError("Invalid value for argument style")
+
+	if fig is not None:
+		diagdatapoint.wffigure = fig
+	sys.stderr.write("(wave function plot to %s.pdf)\n" % fname)
+	sys.stderr.write("(wave function data to %s.csv)\n" % fname)
+	return
+
+def twodim(
+	data, params, wfstyle = None, wflocations = None, filename = "",
+	erange = None, remember_eivec = True, dependence = 'b',
+	set_eivec_phase = False, ll_full = False):
+	"""
+	Iterate over all data points and plot wave functions (2D and LL modes)
+
+	Arguments:
+	data             DiagData instance.
+	params           PhysParams instance.
+	wfstyle          None or string. Determines the type of wave function plot.
+	wflocations      List, array, or VectorGrid instance. Contains the momenta
+	                 or magnetic field values where wave functions should be
+	                 saved (plot and table).
+	filename         String. Filename for the wave functions without extension.
+	erange           List or array of two numbers. Energy range for wave
+		             function plots.
+	remember_eivec   True or False. If True (default), keep the eigenvector data
+	                 in memory If False, delete it afterwards.
+	dependence       'k' or 'b'. Whether to match the argument wflocations to
+	                 momenta (k) or magnetic field (b).
+	set_eivec_phase  True or False. If True, fix complex phase of the wave to a
+	                 sensible value. If False (default), take wave functions as
+	                 given.
+	ll_full          True or False. Whether we are in the full LL mode. See
+	                 documentation for ploto.wavefunction_z() for more
+	                 information.
+
+	Returns:
+	status  Integer or None. On success, return the number of successful wave
+	        function plots. On error, return None.
+	"""
+	if wfstyle is None:
+		sys.stderr.write("ERROR (wf.twodim): Wave function style should not be None.\n")
+		return None
+	if wfstyle.lower() not in ["all", "separate", "default", "together"]:
+		sys.stderr.write("ERROR (wf.twodim): Invalid wave function plot style '%s'.\n" % wfstyle)
+	if not isinstance(wflocations, (list, np.ndarray, VectorGrid)):
+		sys.stderr.write("ERROR (wf.twodim): Invalid or missing value for wflocations.\n")
+		return None
+	if dependence not in ['k', 'b']:
+		raise ValueError("Argument dependence must be 'k' or 'b'")
+
+	n_success = 0
+	n_loc = len(wflocations)
+	sys.stderr.write("Saving wave function plots and data...\n")
+	for ddp in data:
+		if dependence == 'b':
+			if ddp.paramval is None:
+				sys.stderr.write("ERROR (wf.twodim): Missing values for magnetic field.\n")
+				return None
+			k_b_vector = ddp.paramval if isinstance(ddp.paramval, Vector) else Vector(ddp.paramval, astype = 'z')
+			k_b_numeric = k_b_vector.z()
+		elif dependence == 'k':
+			k_b_vector = ddp.k if isinstance(ddp.k, Vector) else Vector(ddp.k, astype = 'x')
+			k_b_numeric = k_b_vector.len()
+		else:
+			raise ValueError("Value for dependence must be either 'k' or 'b'.")
+		j = locations_index(wflocations, vec = k_b_vector, vec_numeric = k_b_numeric)
+		if j is not None:
+			wfloc = wflocations[j]
+			if ddp.eivec is None:
+				sys.stderr.write("ERROR (wf.twodim): At %s, wave functions are requested, but eigenvector data is missing.\n" % k_b_vector)
+				continue
+			if set_eivec_phase:
+				ddp = ddp.set_eivec_phase(inplace = False)
+			if ddp.bindex is None:
+				bandlabels = ["" for _ in ddp.eival]
+			elif ddp.llindex is None:
+				bandlabels = ["%i" % b for b in ddp.bindex]
+			else:
+				bandlabels = ["(%i, %i)" % lb for lb in zip(ddp.llindex, ddp.bindex)]
+			if ddp.char is not None:
+				bandlabels = [("[%s]" % c) if len(b) == 0 else ("%s [%s]" % (b, c)) for b, c in zip(bandlabels, ddp.char)]
+
+			display_k = {'B': k_b_vector} if dependence == 'b' else {'k': k_b_vector}
+			file_id = ("_" + ddp.file_id()) if get_config_bool('wf_locations_filename') else ('-%i' % (j+1))
+			twodim_ddp(
+				ddp, params = params, style = wfstyle,
+				filename = filename + file_id, erange = erange,
+				display_k = display_k, bandlabels = bandlabels, ll_full = ll_full,
+				phase_rotate = (not set_eivec_phase))
+			n_success += 1
+			if not remember_eivec:
+				ddp.delete_eivec()
+			sys.stderr.write("%i / %i\n" % (n_success, n_loc))
+
+	if n_success == 0 and n_loc > 0:
+		sys.stderr.write("Warning (wf.twodim): No wave function files written.\n")
+	elif n_success < n_loc:
+		sys.stderr.write("Warning (wf.twodim): Fewer wave function files written than requested.\n")
+	return n_success
+
+def onedim_ddp(diagdatapoint, params, style = None, filename = "", erange = None, overlap_eivec = None, **kwds):
+	"""Plot wave function for a DiagDataPoint in a 1D calculation.
+
+	Arguments:
+	diagdatapoint  DiagDataPoint instance. Contains the data. The instance must
+	               contain eigenvector data, i.e., diagdatapoint.eivec is not
+	               None.
+	params         PhysParams instance.
+	style          'z' or '1d'; 'y'; 'default' or 'zy'; 'byband' or 'color'.
+	               Style of the output. For 'z' or '1d', plot psi(z) for y = 0
+	               for all wave functions in separate plots (which may be
+	               bundled as a single. For 'y', plot |psi(y)|^2, integrated
+	               over z, separated by orbitals (and subbands if requested, see
+	               overlap_eivec). For 'zy', plot |psi(z,y)|^2, total over all
+	               orbitals. For 'byband' or 'color', plot |psi(z,y)|^2 with
+	               colouring depending on local orbital character.
+	filename       String. Filename for the wave functions without extension.
+	erange         List or array of two numbers. Energy range for wave function
+	               plots. Do not include states with energy eigenvalue outside
+	               this range.
+	overlap_eivec  A dict instance or None. The keys are the subband labels, the
+	               values are arrays representing the eigenvector. If style is
+	               'y', it will do the following: If given, plot the
+	               decomposition of the state into subbands in addition to the
+	               decomposition into orbitals. If set to None (default), do the
+	               latter only. For other styles, this argument is ignored.
+	**kwds         Further keyword arguments are passed to the plot function.
+	               (Not to the table/csv function.)
+	"""
+	if not isinstance(filename, str):
+		raise TypeError("Argument filename must be a string instance.")
+	elif (filename.lower().endswith(".pdf") or filename.lower().endswith(".csv")) and len(filename) > 4:
+		fname = filename[:-4]
+	elif filename == "":
+		fname = "wfs"
+	else:
+		fname = filename
+
+	display_k = {'k': diagdatapoint.k}
+
+	if style.lower() in ["z", "1d"]:
+		ploto.wavefunction_z(
+			params, diagdatapoint, filename = fname + '.pdf', eivalrange = erange,
+			display_k = display_k)
+	elif style.lower() in ["y"]:
+		magn_wf = None if diagdatapoint.paramval is None else diagdatapoint.paramval.z() if isinstance(diagdatapoint.paramval, Vector) else diagdatapoint.paramval
+		obsy = observables(diagdatapoint.eivec, params, ['y', 'y2'])
+		ploto.abs_wavefunctions_y(
+			params, diagdatapoint, filename = fname + '.pdf', eivalrange = erange,
+			overlap_eivec = None, obsy = obsy, display_k = display_k, magn = magn_wf)
+		tableo.abs_wavefunctions_y(
+			params, diagdatapoint, filename = fname + '.csv', eivalrange = erange,
+			overlap_eivec = overlap_eivec, precision = 10)
+		if overlap_eivec is not None:
+			fnamesub = "wfssub" if len(fname) <= 3 else "wfssub" + fname[3:]
+			ploto.abs_wavefunctions_y(
+				params, diagdatapoint, filename = fnamesub + ".pdf",
+				eivalrange = erange, overlap_eivec = overlap_eivec, obsy = obsy,
+				display_k = display_k, magn = magn_wf)
+	elif style.lower() in ["byband", "by_band", "color", "colour"]:
+		ploto.wavefunction_zy(
+			params, diagdatapoint, eivalrange = erange, display_k = display_k,
+			filename = fname + '.pdf', separate_bands = True)
+		tableo.wavefunction_zy(
+			params, diagdatapoint, eivalrange = erange,	separate_bands = True,
+			filename = fname + '.csv')
+	elif style.lower() in ["default", "zy", "yz"]:
+		ploto.wavefunction_zy(
+			params, diagdatapoint, eivalrange = erange, display_k = display_k,
+			filename = fname + '.pdf')
+		tableo.wavefunction_zy(
+			params, diagdatapoint, eivalrange = erange,	filename = fname + '.csv')
+	else:
+		sys.stderr.write("ERROR (wf.onedim_ddp): Invalid value '%s' for argument style.\n" % style)
+	return
+
diff --git a/kdotpy-v1.0.0/src/kdotpy/xmlio/__init__.py b/kdotpy-v1.0.0/src/kdotpy/xmlio/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..7e411349ba73376761ced4f42368cd22e763b411
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/xmlio/__init__.py
@@ -0,0 +1,45 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+# in read.py:
+from .read import readfiles, find_in_tar
+
+# in write.py:
+from .write import writefile
diff --git a/kdotpy-v1.0.0/src/kdotpy/xmlio/read.py b/kdotpy-v1.0.0/src/kdotpy/xmlio/read.py
new file mode 100644
index 0000000000000000000000000000000000000000..528f4b54280d26fa51cef723f7ef0d5fa3f73495
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/xmlio/read.py
@@ -0,0 +1,710 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+
+import re
+import sys
+import os
+import io
+import tarfile
+import gzip
+
+import xml.dom.minidom as xm
+
+from ..config import set_config
+from ..materials import allMaterials, formula_to_compound
+from ..momentum import VectorGrid, vector_from_attr
+from ..diagonalization import DiagDataPoint, DiagData
+from ..physparams import PhysParams
+
+from .tools import getattribute, get_node_values, get_node_dict, isint
+
+
+def find_in_tar(tar_file, pattern, regex = False, replace_existing = False):
+	"""Find pattern in file names contained in tar file.
+
+	Arguments:
+	tar_file          String. The tar file in which to search.
+	pattern           String. The search pattern.
+	regex             True or False. If True, treat pattern as a regular
+	                  expression. If False, do an ordinary match, but accepting
+	                  the wildcards '*' and '?'.
+	replace_existing  True or False. Determines behaviour when a file name is
+	                  found in the tar archive and this file is also found in
+	                  'unpacked' form in the same directory. If True, the
+	                  archived file takes precedence. If False, the unpacked
+	                  one.
+
+	Returns:
+	List of 2-tuples (tar_file, filename), where tar_file is the file name of
+	the archive and filename is the name of the file inside the archive.
+	"""
+	if not regex:
+		pattern = pattern.replace(".", r"[.]").replace("*", ".*").replace("?", ".")
+	if not os.path.isfile(tar_file):
+		return None
+	try:
+		tf = tarfile.open(tar_file)
+	except:
+		return None
+
+	filelist = tf.getnames()
+	tf.close()
+	matches = []
+	for f in filelist:  # .split("\n"):
+		if re.match(pattern, f) is not None:
+			if replace_existing:
+				matches.append((tar_file, f))
+			else:
+				d = os.path.dirname(tar_file)
+				if not os.path.isfile(os.path.join(d, f)):
+					matches.append((tar_file, f))
+	return matches
+
+
+def get_vector_from_element(xmlelement, prefix = '', case_sensitive = True):
+	"""Parse XML element and get Vector instance.
+
+	Arguments:
+	xmlelement      An XML Element.
+	prefix          String. The 'prefix' of the Vector.
+	case_sensitive  True or False. Whether the attributes are matched case
+	                sensitively.
+
+	Returns:
+	Vector instance.
+	"""
+	attr = {}
+	for co in ['', 'x', 'y', 'z', 'theta', 'phi']:
+		kstr = getattribute(xmlelement, prefix + co, case_sensitive = case_sensitive)
+		if kstr != "":
+			attr[prefix + co] = kstr
+	auattr = getattribute(xmlelement, "angleunit")
+	degrees = (auattr != 'rad')  # degrees is default
+	return vector_from_attr(attr, prefix = prefix, deg = degrees)
+
+def get_vectorgrid_from_element(xmlelement):
+	"""Parse XML element and get VectorGrid instance."""
+	prefix = getattribute(xmlelement, 'q')
+	vtype = getattribute(xmlelement, 'vectortype')
+	auattr = getattribute(xmlelement, "angleunit")
+	degrees = (auattr != 'rad')  # degrees is default
+
+	# Add variables/constants and values
+	vgargs = []
+	xvars = xmlelement.getElementsByTagName("variable")
+	for xvar in xvars:
+		vgargs.append(getattribute(xvar, 'component'))
+		val_str = xvar.firstChild.nodeValue
+		vgargs.append(np.array([float(s) for s in val_str.split()]))
+	xconsts = xmlelement.getElementsByTagName("constant")
+	for xconst in xconsts:
+		vgargs.append(getattribute(xconst, 'component'))
+		val_str = xconst.firstChild.nodeValue
+		vgargs.append(float(val_str))
+
+	return VectorGrid(*vgargs, astype = vtype, prefix = prefix, deg = degrees)
+
+def xml_norbitals_heuristic(xinfo):
+	"""Use heuristic in order to get number of orbitals.
+
+	Motivation:
+	Unfortunately, the number of orbitals has not been stored in data files
+	prior to 2018-09-06. We use this 'heuristic' to get it from the command line
+	or from the version info.
+
+	Arguments:
+	xinfo    An XML Element. The contents of <info>.
+
+	Returns:
+	Integer. The number of orbitals.
+	"""
+	norbitals = None
+	# Using <cmdargs>
+	xcmdargs = xinfo.getElementsByTagName("cmdargs")
+	if len(xcmdargs) > 0 and xcmdargs[0].nodeType == xcmdargs[0].ELEMENT_NODE and len(xcmdargs[0].childNodes) >= 1:
+		cmdargs_txt = xcmdargs[0].childNodes[0].data
+		cmdargs_all = cmdargs_txt.split(" ")
+
+		for i in range(0, len(cmdargs_all)):
+			if cmdargs_all[i].lower() in ['eightband', '8band', '8o', '8orb', '8orbital']:
+				if norbitals is not None:
+					return None
+				else:
+					norbitals = 8
+			elif cmdargs_all[i].lower() in ['sixband', '6band', '6o', '6orb', '6orbital']:
+				if norbitals is not None:
+					return None
+				else:
+					norbitals = 6
+			elif cmdargs_all[i].lower() in ['orbitals', 'orb', 'norb', 'n_orb']:
+				if norbitals is not None:
+					return None
+				try:
+					norbitals = int(cmdargs_all[i+1])
+				except:
+					return None
+				if norbitals not in [6, 8]:
+					return None
+	if norbitals is not None:
+		return norbitals
+	# Using <version> (heuristic)
+	xversion = xinfo.getElementsByTagName("version")
+	if len(xversion) > 0 and xversion[0].nodeType == xversion[0].ELEMENT_NODE and len(xversion[0].childNodes) >= 1:
+		version_txt = xversion[0].childNodes[0].data
+		m = re.match(r"([0-9][0-9][0-9][0-9])-([01][0-9])-([0-3][0-9])T([012][0-9]):([0-6][0-9]):([0-6][0-9])?(\+[0-9][0-9][0-9][0-9])? [0-9a-fA-F]+", version_txt)
+		if m is not None:
+			ver_y = int(m.group(1))
+			ver_m = int(m.group(2))
+			ver_d = int(m.group(3))
+			# everything older than version 2018-01-30T11:15:53 af3c684
+			if ver_y < 2018 or (ver_y == 2018 and (ver_m < 1 or (ver_m == 1 and ver_d < 30))):
+				norbitals = 6
+			# else: number of orbitals should have been specified via command line
+			# if norbitals is not set at this point, something must have gone wrong
+	return norbitals
+
+def xml_kdim_heuristic(xinfo):
+	"""Use heuristic in order to get number of momentum dimensions.
+
+	Motivation:
+	The number of momentum dimensions has not been stored in data files prior
+	to 2019-02-19. We use this "heuristic" to get it from the <generator> tag.
+
+	Note:
+	The renaming to 'kdotpy' happened at a later date, hence we only check for
+	'hgmnte-*' here.
+
+	Arguments:
+	xinfo    An XML Element. The contents of <info>.
+
+	Returns:
+	Integer. The number of orbitals.
+	"""
+	# Using <generator>
+	xgen = xinfo.getElementsByTagName("generator")
+	if len(xgen) > 0 and xgen[0].nodeType == xgen[0].ELEMENT_NODE and len(xgen[0].childNodes) >= 1:
+		cmd = xgen[0].childNodes[0].data.lower()
+		if 'hgmnte' in cmd:
+			if '-1d' in cmd:  # includes -1d-b (obsolete)
+				return 1
+			if '-2d' in cmd:  # includes -2d-b and -2d-bhz (obsolete)
+				return 2
+			if '-e1shift' in cmd:  # (obsolete)
+				return 2
+			if '-bulk' in cmd:  # includes -bulk-ll (in use)
+				return 3
+			if '-ll' in cmd:  # includes -ll-full and -ll-sym (obsolete)
+				return 2
+	return None
+
+def xml_read_params(xelmnt, norbitals = None, kdim = None):
+	"""Parse XML element and get PhysParams instance.
+
+	Arguments:
+	xmlelmnt    An XML Element. The contents of <parameters>.
+	norbitals   Integer (6 or 8) or None. Number of orbitals. Output from the
+	            function xml_norbitals_heuristic().
+	kdim        Integer (1, 2, or 3) or None. Number of momentum dimensions.
+	            Output from the function xml_kdim_heuristic().
+
+	Returns:
+	PhysParams instance.
+	"""
+	params_dict = {}
+
+	# <general>: Number of orbitals
+	xext = xelmnt.getElementsByTagName("general")[0]
+	params_dict['norbitals'] = get_node_values(xext, "n_orbitals")
+	if params_dict['norbitals'] is None:
+		if norbitals is None:
+			sys.stderr.write("Warning (xml_read_params): The number of orbitals could not be read from the data file.\n")
+		else:
+			params_dict['norbitals'] = norbitals
+			if 'verbose' in sys.argv:
+				print("Number of orbitals determined from heuristic using command-line arguments and/or version info: %i orbitals.\n" % norbitals)
+
+	# <external>
+	xext = xelmnt.getElementsByTagName("external")[0]
+	# magnetic field: try vector, else number
+	try:
+		params_dict['magn'] = get_vector_from_element(xelmnt.getElementsByTagName("B")[0], prefix = 'b', case_sensitive = False)
+	except:
+		params_dict['magn'] = get_node_values(xext, "B", defaultvalue = 0.0)
+	params_dict['temperature'] = get_node_values(xext, "T")
+	# Temperature to substitute into material parameter; substitute 0 if undefined.
+	temp = params_dict.get('temperature', 0)
+
+	# <geometry>
+	xgeo = xelmnt.getElementsByTagName("geometry")[0]
+	params_dict['zres'] = get_node_values(xgeo, "z_resolution")
+	params_dict['yres'] = get_node_values(xgeo, "y_resolution")
+	params_dict['width'] = get_node_values(xgeo, "width")
+	params_dict['linterface'] = get_node_values(xgeo, "l_interface")
+	params_dict['a_lattice'] = get_node_values(xgeo, "a_lattice")
+	params_dict['yconfinement'] = get_node_values(xgeo, "y_conf", defaultvalue = 0.0)
+	params_dict['strain_direction'] = get_node_values(xgeo, "strain_axis")
+
+	params_dict['kdim'] = get_node_values(xgeo, "kdim")
+	if params_dict['kdim'] is None:
+		if kdim is None:
+			sys.stderr.write("Warning (xml_read_params): The number of momentum dimensions could not be read from the data file.\n")
+		else:
+			params_dict['kdim'] = kdim
+			if 'verbose' in sys.argv:
+				print("Number of momentum dimensions determined from heuristic using command-line arguments: %i dimensions.\n" % kdim)
+
+	xlayers = xelmnt.getElementsByTagName("layerstructure")
+	if len(xlayers) > 0:
+		xlayers = xlayers[0]
+		xsubst = xlayers.getElementsByTagName("substrate")
+		if len(xsubst) > 0:
+			substcpd = get_node_values(xsubst[0], "compound")
+			params_dict['substrate_material'] = None if substcpd is None else allMaterials.get_from_string(substcpd)
+		nlayer_attr = getattribute(xlayers, "nlayers")
+		nlayer = len(xlayers.getElementsByTagName("layer"))
+		if isint(nlayer_attr) and int(nlayer_attr) != nlayer:
+			raise ValueError
+			# TODO
+		l_layer = [None] * nlayer
+		m_layer = [None] * nlayer
+		zmin = [None] * nlayer
+		zmax = [None] * nlayer
+		layer_types = [""] * nlayer
+		for j in range(0, nlayer):
+			xlayer = xlayers.getElementsByTagName("layer")[j]
+			l_layer[j] = get_node_values(xlayer, "thickness")
+			zmin[j] = get_node_values(xlayer, "z_bottom")
+			zmax[j] = get_node_values(xlayer, "z_top")
+			layer_types[j] = getattribute(xlayer, "type")
+			xlayermat = xlayer.getElementsByTagName("material")
+			if len(xlayermat) > 0:
+				layercpd = get_node_values(xlayermat[0], "compound")
+				# Get material parameters from looking up the compound in allMaterials
+				mat_from_cpd = allMaterials.get_from_string(layercpd)
+				mat_from_cpd = mat_from_cpd.evaluate(T = temp)
+
+				# Get material parameters by reading the parameter values directly
+				exclude_param = ['epsilon_par', 'epsilon_strain']
+				rename_param = {'aFree': 'a'}
+				mat_param = get_node_dict(
+					xlayermat[0], exclude=exclude_param, rename=rename_param
+				)
+
+				elements, composition = formula_to_compound(layercpd)
+				mat_id = "".join(elements) + "-" + layer_types[j] + '-1'
+				mat_param['composition'] = composition
+				mat_from_data = allMaterials.parse_dict(mat_id, mat_param, unique=True)
+				if mat_from_cpd == mat_from_data:
+					sys.stderr.write(f"Info (xml_read_params): Material {mat_from_data.name} matches built-in material {mat_from_cpd.name} for layer {layer_types[j]}.\n")
+					mat = mat_from_cpd
+				else:
+					mat = mat_from_data
+				try:
+					mat = mat.evaluate(T=temp)
+				except Exception as ex:
+					raise ValueError(
+						f"Unable to evaluate material parameters for {mat.name}") from ex
+				if not mat.check_complete():
+					sys.stderr.write(
+						f"ERROR (cmdargs.material): Missing parameters for material {mat.name}.\n")
+				if not mat.check_numeric():
+					sys.stderr.write(
+						f"ERROR (cmdargs.material): Some parameters for material {mat.name} did not evaluate to a numerical value.\n")
+				m_layer[j] = mat
+		if None not in zmin:
+			order = np.argsort(zmin)
+		elif None not in zmax:
+			order = np.argsort(zmax)
+		else:
+			order = np.arange(0, nlayer)
+		# TODO: Consistency checks
+		params_dict['m_layers'] = [m_layer[o] for o in order]
+		params_dict['l_layers'] = [l_layer[o] for o in order]
+	else:  # No layer structure tag, fall back to geometry tag (older versions)
+		m_subst = None
+		l_well = get_node_values(xgeo, "l_well")
+		l_barr = get_node_values(xgeo, "l_barr")
+		if l_well is None:
+			l_well = get_node_values(xgeo, "l_HgMnTe")
+		if l_barr is None:
+			l_barr = get_node_values(xgeo, "l_HgCdTe")
+		if l_well is not None and l_barr is not None:
+			l_layer = [l_barr, l_well, l_barr]
+			xmat = xelmnt.getElementsByTagName("material")
+			m_layer = [None, None, None]
+			for x in xmat:
+				mattype = getattribute(x, "layer")
+				matcpd = getattribute(x, "compound")
+				matconc = get_node_values(x, "concentration", defaultvalue = 0)
+				if matcpd is not None and matconc is not None:
+					if mattype == "well" or (mattype == "" and matcpd == "HgMnTe"):  # latter option: very old versions
+						m_layer[1] = allMaterials.get_from_string(matcpd, matconc)
+					elif mattype == "barrier" or (mattype == "" and matcpd == "HgCdTe"):  # latter option: very old versions
+						m_layer[0] = allMaterials.get_from_string(matcpd, matconc)
+						m_layer[2] = m_layer[0]
+					elif mattype == "substrate":
+						m_subst = allMaterials.get_from_string(matcpd, matconc)
+					else:
+						raise ValueError("Invalid material/layer type %s" % mattype)
+			params_dict['m_layers'] = m_layer
+			params_dict['l_layers'] = l_layer
+			params_dict['substrate_material'] = m_subst
+
+	params_dict['hide_yconfinement_warning'] = True
+	params_dict['hide_strain_warning'] = True
+	return PhysParams(**params_dict)
+
+class FileTypeError(Exception):
+	"""FileTypeError exception"""
+	pass
+
+class XMLFileWrapper:
+	"""Container class for XML file or tar'ed or gzipped XML files
+
+	Attributes:
+	xmlfile   String or File object. If a string, the file name.
+	tarfile   File object.
+	filetype  String. File type; one of 'xml', 'xmlgz', 'targz'.
+	"""
+	def __init__(self, fname, basedir = None):
+		self.xmlfile = None
+		self.tarfile = None
+		if isinstance(fname, str) and fname.endswith(".xml"):
+			self.filetype = 'xml'
+			self.xmlfile = os.path.join(basedir, fname) if basedir is not None and not os.path.isabs(fname) else fname
+		elif isinstance(fname, str) and fname.endswith(".xml.gz"):
+			self.filetype = 'xmlgz'
+			xgzfname = os.path.join(basedir, fname) if basedir is not None and not os.path.isabs(fname) else fname
+			self.xmlfile = gzip.open(xgzfname, 'r')
+		elif isinstance(fname, tuple) and len(fname) == 2 and fname[0].endswith(".tar.gz"):
+			self.filetype = 'targz'
+			tfname = os.path.join(basedir, fname[0]) if basedir is not None and not os.path.isabs(fname[0]) else fname[0]
+			try:
+				self.tarfile = tarfile.open(tfname)
+			except:
+				raise IOError
+			try:
+				self.xmlfile = self.tarfile.extractfile(fname[1])
+			except:
+				self.tarfile.close()
+				raise IOError
+		else:
+			raise FileTypeError("Non-parseable file type")
+
+	def parse(self):
+		"""Parse the XML content"""
+		if self.filetype == 'xml':
+			return xm.parse(self.xmlfile)
+		elif self.filetype == 'xmlgz':
+			return xm.parseString(self.xmlfile.read())
+		elif self.filetype == 'targz':
+			return xm.parse(self.xmlfile)
+		else:
+			raise FileTypeError("Non-parseable file type")
+
+	def close(self):
+		"""Close file objects"""
+		if isinstance(self.xmlfile, io.IOBase):  # check if it is a file object
+			self.xmlfile.close()
+		if isinstance(self.tarfile, io.IOBase):  # check if it is a file object
+			self.tarfile.close()
+
+	def __del__(self):
+		"""Close file objects"""
+		self.close()
+
+def readfiles(filenames, basedir = None):
+	"""Read and parse XML files.
+
+	Arguments:
+	filenames   List of strings. A list of filenames.
+	basedir     String or None. The directory relative to which the data files
+	            are to be sought. If None, the current directory.
+
+	Returns
+	data        DiagData instance. Dispersion or magnetic-field dependence data.
+	params      PhysParams instance. The physical parameters.
+	dependence  If a dispersion (momentum), then 'k'. Otherwise, the list
+	            [depval, depstr, depunit], where depval is an array containing
+	            the parameter values, depstr is a string with the dependence
+	            parameter (typically 'b' for magnetic field), and depunit a
+	            string representing the unit (typically 'T' for tesla, in case
+	            of magnetic field).
+	"""
+	data = DiagData([])
+	params = None
+	dependence = None
+	depunit = ""
+	mode = None
+	vgrid = None
+
+	for xfn in filenames:
+		this_vgrid = None
+		# Open and parse the file
+		error_str = ("%s from %s" % (xfn[1], xfn[0])) if isinstance(xfn, tuple) and len(xfn) == 2 else "%s" % xfn
+		try:
+			xmlfile = XMLFileWrapper(xfn, basedir = basedir)
+		except FileTypeError:
+			sys.stderr.write("Warning (Readfiles): File %s is not of a parseable type. It will not be read as a data file.\n" % error_str)
+			continue
+		except:  # IOError, etc.
+			sys.stderr.write("ERROR (Readfiles): Could not read file %s.\n" % error_str)
+			exit(1)
+
+		xf = xmlfile.parse()
+		xdatafile = xf.getElementsByTagName("datafile")[0]
+
+		# Parse the <info> section (for heuristic determination of norbitals
+		xinfo = xdatafile.getElementsByTagName("info")
+		if len(xinfo) != 1:
+			sys.stderr.write("Warning (Readfiles): The data file %s should have exactly one <info> tag. All but the first <info> tag is ignored.\n" % error_str)
+		xinfo = xinfo[0]
+		norb_heur = xml_norbitals_heuristic(xinfo)
+		kdim_heur = xml_kdim_heuristic(xinfo)
+
+		# Parse configuration values (multiple instances possible)
+		xconfigs = xdatafile.getElementsByTagName("configuration")
+		for xconfig in xconfigs:
+			config_values = {}
+			for xconfval in xconfig.childNodes:
+				if xconfval.nodeType == xconfval.ELEMENT_NODE:
+					tag = xconfval.tagName
+					try:
+						val = xconfval.firstChild.data
+					except:
+						pass
+					else:
+						config_values[tag] = val
+			for key in config_values:
+				if not set_config(key, config_values[key]):
+					sys.stderr.write("Warning (ReadFiles): Unknown configuration value '%s' in input file %s.\n" % (key, error_str))
+
+		# Parse parameters (SysParam instance)
+		xparams = xdatafile.getElementsByTagName("parameters")[0]
+		this_params = xml_read_params(xparams, norbitals = norb_heur, kdim = kdim_heur)
+		if params is None:
+			params = this_params
+		else:
+			params_diff = params.diff(this_params)
+			if not params.check_equal(params_diff):
+				sys.stderr.write("Warning (ReadFiles): Data files have conflicting parameter values\n")
+				print("Parameter differences:")
+				params.print_diff(params_diff, style = "align")
+
+		# dispersion
+		xdisps = xdatafile.getElementsByTagName("dispersion")
+		if len(xdisps) > 0:
+			xdisp = xdisps[0]
+			if not (mode is None or mode == "dispersion"):
+				sys.stderr.write("ERROR (ReadFiles): Cannot mix dispersion and dependence\n")
+				exit(1)
+			mode = "dispersion"
+
+			xvgs = xdisp.getElementsByTagName("vectorgrid")
+			if len(xvgs) > 0:
+				this_vgrid = get_vectorgrid_from_element(xvgs[0])
+				if this_vgrid.prefix is not None and this_vgrid.prefix != 'k':
+					sys.stderr.write("Warning (ReadFiles): Data file contains dispersion data, but variable is not 'k' (momentum).\n")
+
+			xks = xdisp.getElementsByTagName("momentum")
+			for xk in xks:
+				kval = get_vector_from_element(xk, 'k')
+				xeival = xk.getElementsByTagName("energies")
+
+				if len(xeival) > 0:
+					eival_str = xeival[0].firstChild.nodeValue
+					eivals = np.array([float(s) for s in eival_str.split()])
+				else:
+					eivals = np.array([])
+					data.append(DiagDataPoint(kval, np.array([]), None), strictmatch = True)
+					continue
+				ddp = DiagDataPoint(kval, eivals, None)
+
+				xchar = xk.getElementsByTagName("characters")
+				if len(xchar) > 0 and xchar[0].firstChild is not None:
+					char = xchar[0].firstChild.nodeValue.split()
+					if kval == 0:
+						ddp.set_char(char)
+					else:
+						sys.stderr.write("Warning (ReadFiles): Band characters given at k != 0 are ignored.\n")
+
+				xbindex = xk.getElementsByTagName("bandindex")
+				if len(xbindex) > 0:
+					bindex = [int(x) for x in xbindex[0].firstChild.nodeValue.split()]
+					ddp.set_bindex(bindex)
+
+				xllindex = xk.getElementsByTagName("llindex")
+				if len(xllindex) > 0:
+					llindex = [int(x) for x in xllindex[0].firstChild.nodeValue.split()]
+					ddp.set_llindex(llindex)
+
+				xobs = xk.getElementsByTagName("observable")
+				if len(xobs) > 0:
+					obsvals = np.zeros((len(xobs), ddp.neig), dtype = complex)
+					obsids = []
+					for jo, xo in enumerate(xobs):
+						obsids.append(getattribute(xo, "q"))
+						obsval_str = xo.firstChild.nodeValue
+						obsvals[jo] = np.array([complex(s) for s in obsval_str.split()])
+					ddp.set_observables(obsvals, obsids)
+
+				# merge if momentum value is already there; if yes, add new energies only; if no, add data point at new momentum
+				data.append(ddp, strictmatch = True)
+
+			dependence = 'k'
+
+		# dependence on other variable
+		xdisps = xdatafile.getElementsByTagName("dependence")
+		if len(xdisps) > 0:
+			xdisp = xdisps[0]
+			if not (mode is None or mode == "dependence"):
+				sys.stderr.write("ERROR (ReadFiles): Cannot mix dispersion and dependence\n")
+				exit(1)
+			mode = "dependence"
+
+			xvgs = xdisp.getElementsByTagName("vectorgrid")
+			if len(xvgs) > 0:
+				this_vgrid = get_vectorgrid_from_element(xvgs[0])
+				if this_vgrid.prefix is not None and this_vgrid.prefix != 'b':
+					sys.stderr.write("Warning (ReadFiles): Data file contains dependence data, but vector grid contains the variable 'k' (momentum).\n")
+
+
+			depstr = getattribute(xdisp, "variable")
+			xks = xdisp.getElementsByTagName("variabledata")
+			for xk in xks:
+				kval = get_vector_from_element(xk, 'k')
+				pval = get_vector_from_element(xk, depstr)
+				xeival = xk.getElementsByTagName("energies")
+				depunit = getattribute(xdisp, "vunit")
+
+				if len(xeival) > 0:
+					eival_str = xeival[0].firstChild.nodeValue
+					eivals = np.array([float(s) for s in eival_str.split()])
+				else:
+					eivals = np.array([])
+					data.append(DiagDataPoint(kval, np.array([]), None), strictmatch = True)
+					continue
+				ddp = DiagDataPoint(kval, eivals, None, paramval = pval)
+
+				xchar = xk.getElementsByTagName("characters")
+				if len(xchar) > 0 and xchar[0].firstChild is not None:
+					char = xchar[0].firstChild.nodeValue.split()
+					if kval == 0:
+						ddp.set_char(char)
+					else:
+						sys.stderr.write("Warning (ReadFiles): Band characters given at k != 0 are ignored.\n")
+
+				xbindex = xk.getElementsByTagName("bandindex")
+				if len(xbindex) > 0:
+					bindex = [int(x) for x in xbindex[0].firstChild.nodeValue.split()]
+					ddp.set_bindex(bindex)
+
+				xllindex = xk.getElementsByTagName("llindex")
+				if len(xllindex) > 0:
+					llindex = [int(x) for x in xllindex[0].firstChild.nodeValue.split()]
+					ddp.set_llindex(llindex)
+
+				xobs = xk.getElementsByTagName("observable")
+				if len(xobs) > 0:
+					obsvals = np.zeros((len(xobs), ddp.neig), dtype = complex)
+					obsids = []
+					for jo, xo in enumerate(xobs):
+						obsids.append(getattribute(xo, "q"))
+						obsval_str = xo.firstChild.nodeValue
+						obsvals[jo] = np.array([complex(s) for s in obsval_str.split()])
+					ddp.set_observables(obsvals, obsids)
+
+				# merge if momentum+parameter value is already there; if yes, add new energies only; if no, add data point at new momentum+parameter value
+				data.append(ddp, strictmatch = True)
+
+			depval = data.get_paramval()
+			dependence = [depval, depstr, depunit]
+
+		# Check whether the file's VectorGrid is compatible (equal, subset, superset)
+		# with the existing (cached) VectorGrid. If not, start counting the number of
+		# incompatible VectorGrid instances that have been encountered (the existing
+		# instance also counts as one).
+		if isinstance(this_vgrid, VectorGrid):
+			if vgrid is None:
+				# No existing Vectorgrid yet
+				if "verbose" in sys.argv:
+					print("New VectorGrid")
+				vgrid = this_vgrid
+			elif isinstance(vgrid, VectorGrid) and this_vgrid.equal(vgrid):
+				# New equal existing: Keep existing VectorGrid
+				if "verbose" in sys.argv:
+					print("Equal VectorGrid")
+			elif isinstance(vgrid, VectorGrid) and this_vgrid.is_subset_of(vgrid):
+				# New is subset of existing: Keep existing VectorGrid
+				if "verbose" in sys.argv:
+					print("Subset VectorGrid")
+			elif isinstance(vgrid, VectorGrid) and vgrid.is_subset_of(this_vgrid):
+				# New is superset of existing: Change to new VectorGrid
+				if "verbose" in sys.argv:
+					print("Superset VectorGrid")
+				vgrid = this_vgrid
+			elif isinstance(vgrid, VectorGrid) and vgrid.is_compatible_with(this_vgrid):
+				# Compatible grids (that can be combined)
+				if "verbose" in sys.argv:
+					print("Combination VectorGrid")
+				vgrid = vgrid.extend(this_vgrid)
+			# Otherwise: Not compatible
+			elif isinstance(vgrid, int):
+				vgrid += 1
+			else:
+				vgrid = 2
+
+		del xmlfile
+
+	if isinstance(vgrid, int):
+		sys.stderr.write("Warning (ReadFiles): Multiple (#=%i) incompatible VectorGrid definitions\n" % vgrid)
+	elif vgrid is not None:
+		# Build a new data array with the appropriate VectorGrid member
+		try:
+			data = DiagData(data.data, grid = vgrid)
+		except:
+			sys.stderr.write("Warning (ReadFiles): Could not create data array with VectorGrid. The combination of data points may be unsuitable. Continuing with a data array without VectorGrid.\n")
+		else:
+			data.align_with_grid()
+	# else: pass
+
+	return data, params, dependence
diff --git a/kdotpy-v1.0.0/src/kdotpy/xmlio/tools.py b/kdotpy-v1.0.0/src/kdotpy/xmlio/tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..ca87b65e09b469fe7c6b00a1f59906194a4159eb
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/xmlio/tools.py
@@ -0,0 +1,253 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+import sys
+import os
+from platform import system
+
+### SOME TOOLS ###
+def array_to_text(arr):
+	"""String representation of a numpy array.
+	Array elements are separated by spaces. For float type, values smaller than
+	1e-14 in absolute value are represented by 0. For complex type, the array is
+	represented as real (float type) if the imaginary parts are smaller than
+	1e-14 in absolute value.
+
+	Argument:
+	arr   A one-dimensional NumPy array.
+
+	Returns:
+	String
+	"""
+	arrtype = arr.dtype.kind  # single character code for the array data type
+	if arrtype in 'iuSU':  # signed int, unsigned int, str, unicode (deprecated)
+		return " ".join([str(x) for x in arr])
+	elif arrtype == 'f':  # float
+		arr0 = np.where(np.abs(arr) < 1e-14, np.zeros_like(arr), arr)
+		return " ".join([str(x) for x in arr0])
+	elif arrtype == 'c':  # complex
+		arr_re = np.where(np.abs(np.real(arr)) < 1e-14, np.zeros_like(arr, dtype = float), np.real(arr))
+		if np.abs(np.imag(arr)).max() < 1e-14:
+			return " ".join([str(x) for x in arr_re])
+		else:
+			arr_im = np.where(np.abs(np.imag(arr)) < 1e-14, np.zeros_like(arr, dtype = float), np.imag(arr))
+			return " ".join([str(x) for x in (arr_re + 1.j * arr_im)])
+	else:
+		return ""
+
+def matrix_to_text(mat):
+	"""String representation of a numpy matrix.
+	Entries (columns) are separated by spaces. Rows are separated by semicolon
+	';' and space. For float type, values smaller than 1e-14 in absolute value
+	are represented by 0. For complex type, the array is represented as real
+	(float type) if the imaginary parts are smaller than 1e-14 in absolute
+	value.
+
+	Argument:
+	mat    A numpy matrix. It may not be a 2-dimensional array.
+
+	Returns:
+	String
+	"""
+	if not isinstance(mat, np.ndarray) and mat.ndim == 2:
+		raise TypeError("Argument must be a matrix (numpy.ndarray instance with ndim = 2)")
+	mattype = mat.dtype.kind  # single character code for the array data type
+	if mattype in 'iuSU':  # signed int, unsigned int, str, unicode (deprecated)
+		return "; ".join([" ".join([str(x) for x in row]) for row in mat])
+	elif mattype == 'f':  # float
+		mat0 = np.where(np.abs(mat) < 1e-14, np.zeros_like(mat), mat)
+		return "; ".join([" ".join([str(x) for x in row]) for row in mat0])
+	elif mattype == 'c':  # complex
+		mat_re = np.where(np.abs(np.real(mat)) < 1e-14, np.zeros_like(mat, dtype = float), np.real(mat))
+		if np.abs(np.imag(mat)).max() < 1e-14:
+			return "; ".join([" ".join([str(x) for x in row]) for row in mat_re])
+		else:
+			mat_im = np.where(np.abs(np.imag(mat)) < 1e-14, np.zeros_like(mat, dtype = float), np.imag(mat))
+			return "; ".join([" ".join([str(x) for x in row]) for row in (mat_re + 1.j*mat_im)])
+	else:
+		return ""
+
+def isint(s):
+	try:
+		int(s)
+		return True
+	except:
+		pass
+	return False
+
+def isfloat(s):
+	try:
+		float(s)
+		return True
+	except:
+		pass
+	return False
+
+
+def which_cmd(cmd):
+	"""Resolve command using PATH environment variable
+
+	Argument:
+	cmd       String. Command to be looked for.
+
+	Returns:
+	full_cmd  String or None. If any "path/cmd" exists, return that value. If
+	          not found, return None.
+	"""
+	if "PATH" not in os.environ:
+		return None
+	path_sep = ';' if system() == 'Windows' else ':'
+	paths = os.environ["PATH"].split(path_sep)
+	for p in paths:
+		if os.path.exists(p):
+			full_cmd = os.path.join(p, cmd)
+			if os.path.exists(full_cmd):
+				return full_cmd
+	return None
+
+def resolve_kdotpy_cmd(script, kdotpy_cmd = 'kdotpy'):
+	"""Resolve kdotpy command using PATH environment variable
+	Replace /path/to/kdotpy-xx.py by 'kdotpy xx' if 'kdotpy' refers to the main
+	script.
+
+	Arguments:
+	script      String. Script path to be replaced.
+	kdotpy_cmd  String. The main script. One should not really use anything else
+	            than the default value 'kdotpy'.
+
+	Returns:
+	cmds_list   List of strings. If the replacement has succeeded, return
+	            ['kdotpy'], otherwise return [script] (a single-element list
+	            with the input argument script).
+	"""
+	which_kdotpy = which_cmd(kdotpy_cmd)
+	if which_kdotpy is None:
+		return [script]
+	kdotpy_path = os.path.realpath(which_kdotpy)
+	if os.path.isfile(script):
+		scriptdir = os.path.dirname(script)
+		scriptfile = os.path.basename(script)
+	else:
+		sys.stderr.write("Warning (resolve_kdotpy_cmd_path): Script does not refer to an existing file or path.\n")
+		return [script]
+	if os.path.samefile(scriptdir, os.path.dirname(kdotpy_path)) and scriptfile == 'kdotpy':
+		return [kdotpy_cmd]
+	if scriptdir.endswith("kdotpy") and scriptfile == "__main__.py":
+		return [kdotpy_cmd]
+	return [script]
+
+
+### 'SIMPLE' GET FUNCTIONS ###
+def getattribute(xmlelement, attrname, case_sensitive = True):
+	"""Get an XML attribute
+
+	Arguments:
+	xmlelement      An XML Element.
+	attrname        String. Attribute name to match.
+	case_sensitive  True or False. Whether the attribute name match is done case
+	                sensitively.
+
+	Returns:
+	String. If the XML element has a matching attribute, the attribute value.
+	Otherwise the empty string.
+	"""
+	n_attr = xmlelement.attributes.length
+	if n_attr == 0:
+		return ""
+	for i in range(0, n_attr):
+		if xmlelement.attributes.item(i).name == attrname or ((not case_sensitive) and xmlelement.attributes.item(i).name.lower() == attrname.lower()):
+			return xmlelement.attributes.item(i).value
+	return ""
+
+def get_node_values(xparent, which, defaultvalue = None):
+	"""Get node value scanning through child nodes of an XML element.
+	The function returns the value of the first node that matches (by tag name).
+
+	xparent       An XML Element. The parent node, in which to do the scan.
+	which         String. The tag name for which to scan.
+	defaultvalue  The return value if the tag name is not found.
+
+	Returns:
+	If there is a matching node, an integer, float, or string depending on the
+	actual contents. If there is no match, return defaultvalue (any type).
+	"""
+	for x in xparent.getElementsByTagName(which):
+		if x.nodeType == x.ELEMENT_NODE and len(x.childNodes) >= 1:
+			if isint(x.childNodes[0].nodeValue):
+				# print (which, "=", int(x.childNodes[0].nodeValue))
+				return int(x.childNodes[0].nodeValue)
+			elif isfloat(x.childNodes[0].nodeValue):
+				# print (which, "=", float(x.childNodes[0].nodeValue))
+				return float(x.childNodes[0].nodeValue)
+			else:
+				# print (which, "=", x.childNodes[0].data)
+				return x.childNodes[0].data
+	return defaultvalue
+
+def get_node_dict(xparent, exclude = [], rename = {}):
+	"""Get node value scanning through child nodes of an XML element.
+	Only take the first sublevel; the extraction of values is not recursive.
+
+	Note: Unlike get_node_values(), this function does not try to interpret the
+	data as integer or float values.
+
+	Arguments:
+	xparent       An XML Element. The parent node, from which
+	exclude       List of strings. The tag names to ignore.
+	rename        A dict instance, of the form {'tag': 'key', ...}. Here, the
+	              value of the XML tag 'tag' is saved with the dict key 'key'.
+
+	Returns:
+	data          A dict instance. The keys are the tag names, the values the
+	              node values.
+	"""
+	data = {}
+	for x in xparent.childNodes:
+		if x.nodeType == x.ELEMENT_NODE and len(x.childNodes) >= 1:
+			tag = x.tagName
+			val = x.childNodes[0].nodeValue
+			if tag in exclude:
+				continue
+			if tag in rename:
+				tag = rename[tag]
+			if isinstance(val, str) and len(val) > 0:
+				data[tag] = val
+	return data
diff --git a/kdotpy-v1.0.0/src/kdotpy/xmlio/write.py b/kdotpy-v1.0.0/src/kdotpy/xmlio/write.py
new file mode 100644
index 0000000000000000000000000000000000000000..34841dbd9f6b2c751f38e74b631b7e5d9905ea81
--- /dev/null
+++ b/kdotpy-v1.0.0/src/kdotpy/xmlio/write.py
@@ -0,0 +1,837 @@
+# kdotpy - k·p theory on a lattice for simulating semiconductor band structures
+# Copyright (C) 2024 The kdotpy collaboration <kdotpy@uni-wuerzburg.de>
+#
+# SPDX-License-Identifier: GPL-3.0-only
+#
+# This file is part of kdotpy.
+#
+# kdotpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, version 3.
+#
+# kdotpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# kdotpy. If not, see <https://www.gnu.org/licenses/>.
+#
+# Under Section 7 of GPL version 3 we require you to fulfill the following
+# additional terms:
+#
+#     - We require the preservation of the full copyright notice and the license
+#       in all original files.
+#
+#     - We prohibit misrepresentation of the origin of the original files. To
+#       obtain the original files, please visit the Git repository at
+#       <https://git.physik.uni-wuerzburg.de/kdotpy/kdotpy>
+#
+#     - As part of a scientific environment, we believe it is reasonable to
+#       expect that you follow the rules of good scientific practice when using
+#       kdotpy. In particular, we expect that you credit the original authors if
+#       you benefit from this program, by citing our work, following the
+#       citation instructions in the file CITATION.md bundled with kdotpy.
+#
+#     - If you make substantial changes to kdotpy, we strongly encourage that
+#       you contribute to the original project by joining our team. If you use
+#       or publish a modified version of this program, you are required to mark
+#       your material in a reasonable way as different from the original
+#       version.
+
+import numpy as np
+
+from time import strftime
+import os
+import socket
+import sys
+import shlex
+
+import xml.dom.minidom as xm
+
+from ..config import get_all_config, get_config_bool
+from ..materials import Material, material_parameters_units
+from ..layerstack import LayerStack
+from ..momentum import Vector
+from ..observables import all_observables
+from ..latticetrans import normvec_to_intvec, euler_angles_zxz
+from .. import __version__
+
+from .tools import array_to_text, matrix_to_text, resolve_kdotpy_cmd
+
+####### XML #######
+
+def addchild(xroot, xparent, tag, data = None):
+	"""Add a child node, common data types.
+
+	Arguments:
+	xroot    An XML Document. The XML root.
+	xparent  An XML Element. The parent node for the newly created node.
+	tag      String. Tag name for the new node.
+	data     None, string, list, tuple, numpy array, numpy matrix, or any
+	         object with a __str__() method. If None, create an empty node.
+
+	Returns:
+	An XML Element. The new node.
+	"""
+	x = xroot.createElement(tag)
+	xparent.appendChild(x)
+	if data is None:
+		return x
+	if isinstance(data, str):
+		text = data
+	elif isinstance(data, list) or isinstance(data, tuple):
+		text = " ".join([str(d) for d in data])
+		xtext = xroot.createTextNode(text)
+	elif isinstance(data, np.ndarray) and data.ndim == 2:
+		text = matrix_to_text(data)
+	elif isinstance(data, np.ndarray):
+		text = array_to_text(data)
+	else:
+		text = str(data)
+	xtext = xroot.createTextNode(text)
+	x.appendChild(xtext)
+	return x
+
+def addchild_material(xroot, xparent, tag, material, fmt = None):
+	"""Add a child node with material data.
+
+	Arguments:
+	xroot     An XML Document. The XML root.
+	xparent   An XML Element. The parent node for the newly created node.
+	tag       String. Tag name for the new node.
+	material  String or Material instance. If a string, add a node with the
+	          string as data. If a Material instance, create a node with the
+	          formatted material name as data or a standardized string
+	          representation plus a few child nodes with properties.
+	fmt       None or string. The format type for the material.format()
+	          function. Possible values: 'full', 'sub', 'tex', 'tuple', 'plain'.
+
+	Returns:
+	An XML Element. The new node.
+	"""
+	xm = None
+	if isinstance(material, str):
+		xm = addchild(xroot, xparent, tag, material)
+	elif isinstance(material, Material):
+		if fmt in ['full', 'sub', 'tex', 'tuple', 'plain']:
+			mat = material.format(fmt)
+			xm = addchild(xroot, xparent, tag, mat)
+		else:
+			xm = addchild(xroot, xparent, tag, material.name)
+			if len(material[0]) == 6:
+				try:
+					xm.setAttribute('dopant', material.name[2:4])
+					xm.setAttribute('doping_fraction', material.composition[1])
+				except:
+					sys.stderr.write("Warning (addchild_material): Cannot determine dopant and/or doping fraction.\n")
+	if xm is None:
+		sys.stderr.write("Warning (addchild_material): Not a properly formatted material. Output is omitted.\n")
+	return xm
+
+def addchild_layerstack(xroot, xparent, tag, layerstack, substrate_material = None):
+	"""Add a child node with layer stack information.
+
+	Arguments:
+	xroot               An XML Document. The XML root.
+	xparent             An XML Element. The parent node for the newly created
+	                    node.
+	tag                 String. Tag name for the new node.
+	layerstack          LayerStack instance.
+	substrate_material  None or Material instance. If set, add the substrate
+	                    material as <substrate> node into the present node.
+
+	Returns:
+	An XML Element. The new node.
+	"""
+	if not isinstance(layerstack, LayerStack):
+		raise ValueError("Argument must be a LayerStack instance.")
+
+	xls = addchild(xroot, xparent, tag)
+	xls.setAttribute("nlayer", str(layerstack.nlayer))
+	xls.setAttribute("plus_substrate", "yes" if isinstance(substrate_material, Material) else "no")
+	if isinstance(substrate_material, Material):
+		xl = addchild(xroot, xls, "substrate")
+		xmat = addchild(xroot, xl, "material")
+		xmat.setAttribute("compound", substrate_material.name)
+		# if isinstance(mat, Material):
+		# 	x = addchild(xroot, xparams1, "concentration", str(mat[1]))
+		# 	x.setAttribute("element", mat[0][2:4])
+		# 	x.setAttribute("type", "doping_fraction")
+		if 'a' in substrate_material.param:
+			x = addchild(xroot, xmat, "a_lattice", substrate_material['a'])
+			x.setAttribute("unit", "nm")
+		addchild_material(xroot, xmat, "compound", substrate_material, fmt ='tex')
+	elif substrate_material is not None:
+		raise TypeError("Substrate_material must be a Material instance.")
+
+	for j, layerdata in enumerate(layerstack):
+		param_mat, z, name = layerdata
+		z_bottom, thickness, z_top = z
+		xl = addchild(xroot, xls, "layer")
+		if name is not None:
+			xl.setAttribute('type', name)
+		xlp = addchild(xroot, xl, "z_bottom", z_bottom)
+		xlp.setAttribute("unit", "nm")
+		xlp = addchild(xroot, xl, "thickness", thickness)
+		xlp.setAttribute("unit", "nm")
+		xlp = addchild(xroot, xl, "z_top", z_top)
+		xlp.setAttribute("unit", "nm")
+
+		xmat = addchild(xroot, xl, "material")
+		if 'material' in param_mat:
+			addchild_material(xroot, xmat, "compound", param_mat['material'], fmt ='tex')
+		strain_matrix = layerstack.get_strain_matrix(j)
+		for key in sorted(param_mat):
+			if key == 'material':
+				# addchild_material(xroot, xparams1, "compound", param_layer['material'], fmt ='tex')
+				continue
+			if key == 'epsilonxx' and strain_matrix is not None:
+				xepsmat = addchild(xroot, xmat, 'epsilon_strain', strain_matrix)
+				xepsmat.setAttribute('basis', 'a,b,c')
+				continue
+			if key.startswith('epsilon') and not key.startswith('epsilon_'):
+				continue
+
+			x = addchild(xroot, xmat, key, param_mat[key])
+
+			if key in material_parameters_units:
+				u = material_parameters_units[key]
+				if u is not None:
+					x.setAttribute("unit", u)
+	return xls
+
+def addchild_bands_extrema(xroot, xparent, tag, bands_extrema):
+	"""Add a child node with band extrema information.
+
+	Arguments:
+	xroot           An XML Document. The XML root.
+	xparent         An XML Element. The parent node for the newly created node.
+	tag             String. Tag name for the new node.
+	bands_extrema   A dict instance, whose keys are the band labels and values
+	                are lists of BandsExtremum instances.
+
+	Returns:
+	An XML Element. The new node.
+	"""
+	if not isinstance(bands_extrema, dict):
+		raise ValueError("Argument must be a dict instance.")
+
+	xbex = addchild(xroot, xparent, tag)
+	labels = sorted(bands_extrema.keys())
+	for lb in labels:
+		if len(bands_extrema[lb]) == 0:
+			continue
+		xbexb = addchild(xroot, xbex, "band")
+		if bands_extrema[lb][0].bindex is not None:
+			xbexb.setAttribute("index", str(bands_extrema[lb][0].bindex))
+		if bands_extrema[lb][0].llindex is not None:
+			xbexb.setAttribute("ll", str(bands_extrema[lb][0].llindex))
+		if bands_extrema[lb][0].char is not None:
+			xbexb.setAttribute("char", bands_extrema[lb][0].char)
+		order = np.argsort([ex.k.len() for ex in bands_extrema[lb]])
+		for j in order:
+			ex = bands_extrema[lb][j]
+			xex = addchild(xroot, xbexb, "extremum")
+			xex.setAttribute("type", ex.minmax)
+			xexk = addchild(xroot, xex, "momentum")
+			xml_setmomentumattribute(xexk, ex.k)
+			xml_setmomentumvalue(xroot, xexk, ex.k)
+			addchild(xroot, xex, "energy", str(ex.energy)).setAttribute("unit", "meV")
+			if ex.mass is not None:
+				addchild(xroot, xex, "mass", ex.mass).setAttribute("unit", "m_0")
+	return xbex
+
+
+def xmlheader(xroot, xparent, tag = None, caller = None, version_info = None):
+	"""Add a child node with the <info> header.
+	This contains the script name, git version info, current time, host name,
+	command line arguments, info about operating system, and info about Python
+	and the add-on modules.
+
+	Arguments:
+	xroot          An XML Document. The XML root.
+	xparent        An XML Element. The parent node for the newly created node.
+	tag            String or None. If set, tag name for the new node. If None,
+	               use the default tag name <info>.
+	caller         String or None. The script (filename of the executable) for
+	               which the XML file is written. If None, use the first
+	               argument of the command line.
+	version_info   String or None. If a set, include this as (git) version info.
+	               If None, try to extract data from gitv.
+
+	Returns:
+	An XML Element. The new node.
+	"""
+	xinfo = xroot.createElement('info' if tag is None else tag)
+	xparent.appendChild(xinfo)
+	addchild(xroot, xinfo, 'generator', sys.argv[0] if caller is None else caller)
+	addchild(xroot, xinfo, 'currenttime', strftime("%Y-%m-%dT%H:%M:%S %z"))
+	addchild(xroot, xinfo, "version", __version__)
+
+	## Hostname
+	hostname = socket.gethostname()
+	fqdname = socket.getfqdn()
+	addchild(xroot, xinfo, 'hostname', hostname)  # tag formerly called 'clmachine'
+	if fqdname != hostname:
+		addchild(xroot, xinfo, 'hostname_full', fqdname)
+
+	## Command line arguments
+	sys_argv = resolve_kdotpy_cmd(sys.argv[0]) + sys.argv[1:] if get_config_bool('xml_shorten_command') else sys.argv
+	cmd_str = " ".join([shlex.quote(arg) for arg in sys_argv])
+	xcmd = addchild(xroot, xinfo, 'cmdargs', cmd_str)
+	xcmd.setAttribute("n_args", str(len(sys.argv)))
+
+	## OS information and Python (+module) information
+	xmlosinfo(xroot, xinfo, 'os')
+	xmlpyinfo(xroot, xinfo, 'python', modules = [
+		'numpy', 'scipy', 'matplotlib', 'pandas', 'h5py', 'cupy', 'pyMKL',
+		'scikits.umfpack'
+	])
+	return xinfo
+
+def xmlmoduleversion(xroot, xparent, modulename, tag = 'module_version', attr = 'name'):
+	"""Add a child node with version info of a Python module.
+
+	Arguments:
+	xroot       An XML Document. The XML root.
+	xparent     An XML Element. The parent node for the newly created node.
+	modulename  String. Module name for which to include the information, e.g.,
+	            'numpy'.
+	tag         String. Tag name of the new node.
+	attr        String. Attribute name whose value will be the module name.
+
+	Returns:
+	If the module is loaded, an XML Element (the new node). Otherwise, None.
+	"""
+	if modulename not in sys.modules:
+		return None
+	try:
+		ver = sys.modules[modulename].__version__
+	except:
+		return None
+	if isinstance(ver, bytes):
+		ver = str(ver, 'utf-8')
+	xmodv = addchild(xroot, xparent, tag, ver)
+	xmodv.setAttribute(attr, modulename)
+	return xmodv
+
+def xmlpyinfo(xroot, xparent, tag = None, modules = None):
+	"""Add a child node with information about Python and add-on modules.
+
+	Arguments:
+	xroot    An XML Document. The XML root.
+	xparent  An XML Element. The parent node for the newly created node.
+	tag      String or None. Tag name for the new node. If None, use the default
+	         <python>.
+	modules  List of strings or None. The module names which to include if they
+	         are loaded. If None, include information for all loaded modules.
+
+	Returns:
+	An XML Element. The new node.
+	"""
+	xpyinfo = xroot.createElement('python' if tag is None else tag)
+	xparent.appendChild(xpyinfo)
+
+	pyversion = "%s.%s.%s" % (sys.version_info[0], sys.version_info[1], sys.version_info[2])
+	addchild(xroot, xpyinfo, 'version', pyversion)
+
+	if modules is None:  # all modules
+		for m in sorted(sys.modules):
+			xmlmoduleversion(xroot, xpyinfo, m)
+	elif isinstance(modules, list):
+		for m in modules:
+			xmlmoduleversion(xroot, xpyinfo, m)
+	return xpyinfo
+
+def xmlosinfo(xroot, xparent, tag = None):
+	"""Add a child node with info about the operating system.
+	The information one gets may depend on the nature of the operating system,
+	for example, Windows vs Linux and betweem different Linux distributions.
+
+	Arguments:
+	xroot    An XML Document. The XML root.
+	xparent  An XML Element. The parent node for the newly created node.
+	tag      String. Tag name for the new node.
+
+	Returns:
+	An XML Element. The new node.
+	"""
+	xosinfo = xroot.createElement('os' if tag is None else tag)
+	xparent.appendChild(xosinfo)
+
+	addchild(xroot, xosinfo, 'platform', sys.platform)
+	try:
+		uname_result = os.uname()
+	except:
+		pass
+	else:
+		addchild(xroot, xosinfo, 'sysname', uname_result.sysname)
+		addchild(xroot, xosinfo, 'nodename', uname_result.nodename)
+		addchild(xroot, xosinfo, 'release', uname_result.release)
+		addchild(xroot, xosinfo, 'version', uname_result.version)
+		addchild(xroot, xosinfo, 'machine', uname_result.machine)
+	return xosinfo
+
+def xmlconfig(xroot, xparent, tag = None):
+	"""Add a child node with the configuration values.
+
+	Note:
+	If the value of the configuration option 'xml_omit_default_config_values' is
+	True, then output only the configuration values that are not equal to their
+	defaults. (If all values are default, then the configuration node will have
+	no children.) If it is False, output all configuration values.
+
+	Arguments:
+	xroot    An XML Document. The XML root.
+	xparent  An XML Element. The parent node for the newly created node.
+	tag      String or None. If set, tag name for the new node. If None, use the
+	         default tag name <configuration>.
+
+	Returns:
+	An XML Element. The new node.
+	"""
+	omit_default = get_config_bool('xml_omit_default_config_values')
+	xconfig = xroot.createElement('configuration' if tag is None else tag)
+	xparent.appendChild(xconfig)
+	xconfig.setAttribute('default_values_omitted', str(omit_default))
+	all_config = get_all_config(omit_default = omit_default)
+	for key in sorted(all_config):
+		xconfigkey = addchild(xroot, xconfig, key, all_config[key])
+	return xmlconfig
+
+def xmlparams(xroot, xparent, params, tag = None):
+	"""Add a child node with physical parameters.
+	The information is organized into several child nodes.
+
+	Arguments:
+	xroot    An XML Document. The XML root.
+	xparent  An XML Element. The parent node for the newly created node.
+	params   PhysParams instance.
+	tag      String or None. If set, tag name for the new node. If None, use the
+	         default tag name <parameters>.
+
+	Returns:
+	An XML Element. The new node.
+	"""
+	xparams = xroot.createElement('parameters' if tag is None else tag)
+	xparent.appendChild(xparams)
+
+	## Common parameters
+	xparams1 = xroot.createElement('general')
+	xparams.appendChild(xparams1)
+	addchild(xroot, xparams1, "n_orbitals", params.norbitals)
+	# NOTE: gMn and TK0 have become material parameters since v1.0.0
+
+	xparams1 = xroot.createElement('external')
+	xparams.appendChild(xparams1)
+	if isinstance(params.magn, Vector):
+		xmagn = addchild(xroot, xparams1, "B", params.magn.len())
+		xmagn.setAttribute("unit", "T")
+		attr = params.magn.xmlattr(prefix = 'B')
+		for a in attr:
+			xmagn.setAttribute(a, str(attr[a]))
+	else:
+		addchild(xroot, xparams1, "B", params.magn).setAttribute("unit", "T")
+	addchild(xroot, xparams1, "T", params.temperature).setAttribute("unit", "K")
+
+	xparams1 = xroot.createElement('geometry')
+	xparams.appendChild(xparams1)
+	# well and barrier thickness (included for legacy reasons)
+	jwell = params.layerstack.layer_index('well')
+	if jwell is not None and params.kdim <= 2:
+		addchild(xroot, xparams1, "l_well", params.layerstack.thicknesses_z[jwell]).setAttribute("unit", "nm")
+	jbarr = params.layerstack.layer_index('barrier')
+	if jbarr is not None and params.kdim <= 2:
+		addchild(xroot, xparams1, "l_barr", params.layerstack.thicknesses_z[jbarr]).setAttribute("unit", "nm")
+	else:
+		jbarr1 = params.layerstack.layer_index('barrier_bottom')
+		jbarr2 = params.layerstack.layer_index('barrier_top')
+		if jbarr1 is not None and jbarr2 is not None:
+			lbarr1 = params.layerstack.thicknesses_z[jbarr1]
+			lbarr2 = params.layerstack.thicknesses_z[jbarr2]
+			if lbarr1 == lbarr2:
+				addchild(xroot, xparams1, "l_barr", lbarr1).setAttribute("unit", "nm")
+			else:
+				addchild(xroot, xparams1, "l_barr1", lbarr1).setAttribute("unit", "nm")
+				addchild(xroot, xparams1, "l_barr2", lbarr2).setAttribute("unit", "nm")
+
+	# other quantities
+	addchild(xroot, xparams1, "kdim", params.kdim)
+
+	if params.kdim <= 2:
+		addchild(xroot, xparams1, "l_total", params.lz_thick).setAttribute("unit", "nm")
+		addchild(xroot, xparams1, "z_resolution", params.zres).setAttribute("unit", "nm")
+		addchild(xroot, xparams1, "nz", params.nz)
+
+	if params.kdim <= 1:
+		addchild(xroot, xparams1, "width", params.ly_width).setAttribute("unit", "nm")
+		addchild(xroot, xparams1, "y_resolution", params.yres).setAttribute("unit", "nm")
+		addchild(xroot, xparams1, "ny", params.ny)
+		addchild(xroot, xparams1, "y_confinement", params.yconfinement).setAttribute("unit", "meV")
+		if isinstance(params.lattice_trans, (int, float, np.integer, np.floating)):
+			addchild(xroot, xparams1, "strip_angle", params.lattice_trans).setAttribute("unit", "deg")
+
+	if params.kdim <= 2:
+		addchild(xroot, xparams1, "l_interface", params.linterface).setAttribute("unit", "nm")
+		addchild(xroot, xparams1, "n_interface", params.dzinterface)
+
+	addchild(xroot, xparams1, "a_lattice", params.a_lattice).setAttribute("unit", "nm")
+	if isinstance(params.lattice_trans, np.ndarray):
+		xltrans = xroot.createElement('lattice_transformation')
+		xparams1.appendChild(xltrans)
+		addchild(xroot, xltrans, "matrix", params.lattice_trans)
+		for j, row in enumerate(params.lattice_trans):
+			addchild(xroot, xltrans, "%saxis" % ("xyz"[j] if j <= 2 else ""), normvec_to_intvec(row))
+		xeuler = addchild(xroot, xltrans, "euler_angles", euler_angles_zxz(params.lattice_trans, degrees = True))
+		xeuler.setAttribute("unit", "deg")
+		xeuler.setAttribute("rotation_axes", "z,x,z")
+	addchild_layerstack(xroot, xparams, "layerstructure", params.layerstack, substrate_material = params.substrate_material)
+
+	return xparams
+
+def xmloptions(xroot, xparent, options, tag = None):
+	"""Add a child node with extra option values.
+
+	Arguments:
+	xroot    An XML Document. The XML root.
+	xparent  An XML Element. The parent node for the newly created node.
+	options  A dict instance, of the form {'option': value}.
+	tag      String or None. If set, tag name for the new node. If None, use the
+	         default tag name <options>.
+
+	Returns:
+	An XML Element. The new node.
+	"""
+	xopts = xroot.createElement('options' if tag is None else tag)
+	xparent.appendChild(xopts)
+
+	for o in options:
+		val = options[o]
+		if val is True:
+			x = addchild(xroot, xopts, o)
+		elif val is not None and val is not False:
+			x = addchild(xroot, xopts, o, val)
+			if o in ['e1shift', 'e1shift_up', 'e1shift_dn', 'split', 'vgate', 'vsurf', 'targetenergy', 'selfcon_accuracy']:
+				x.setAttribute("unit", "meV")
+			elif o in ['vsurf_l', 'l_depletion']:
+				x.setAttribute("unit", "nm")
+			elif o in ['cardens', 'n_depletion']:
+				x.setAttribute("unit", "e/nm^2")
+		else:
+			pass
+	return xopts
+
+def xml_setmomentumattribute(xelmnt, k, kuattr = "unit", auattr = "angleunit"):
+	"""Set attributes for a node encoding a momentum vector.
+
+	Arguments:
+	xelmnt    An XML Element. The node for which to set the attributes.
+	k         Vector instance, float, or tuple. The vector value. Tuple is
+	          included for legacy reasons and should no longer be used.
+	kuattr    String. Attribute that contains the unit (vector magnitude).
+	auattr    String. Attribute that contains the angular unit.
+
+	Returns:
+	xelmnt    An XML Element. The modified input Element.
+	"""
+	if isinstance(k, Vector):
+		attr = k.xmlattr(prefix = 'k')
+		for a in attr:
+			if a == 'angleunit':
+				xelmnt.setAttribute(auattr, str(attr[a]))
+			else:
+				xelmnt.setAttribute(a, str(attr[a]))
+	elif isinstance(k, float):
+		xelmnt.setAttribute("kx", str(k))
+	elif isinstance(k, tuple) and len(k) == 2:
+		xelmnt.setAttribute("kx", str(k[0]))
+		xelmnt.setAttribute("ky", str(k[1]))
+	elif isinstance(k, tuple) and len(k) == 3 and isinstance(k[2], float):
+		xelmnt.setAttribute("kx", str(k[0]))
+		xelmnt.setAttribute("ky", str(k[1]))
+		xelmnt.setAttribute("kz", str(k[2]))
+	elif isinstance(k, tuple) and len(k) == 3 and k[2] == 'deg':
+		xelmnt.setAttribute("k", str(k[0]))
+		xelmnt.setAttribute("kphi", str(k[1]))
+		xelmnt.setAttribute("kx", str(k[0] * np.cos(k[1] * np.pi / 180.)))
+		xelmnt.setAttribute("ky", str(k[0] * np.sin(k[1] * np.pi / 180.)))
+		xelmnt.setAttribute(auattr, "deg")
+	elif isinstance(k, tuple) and len(k) == 3 and k[2] in ['phi', 'kphi', 'rad']:
+		xelmnt.setAttribute("k", str(k[0]))
+		xelmnt.setAttribute("kphi", str(k[1]))
+		xelmnt.setAttribute("kx", str(k[0] * np.cos(k[1])))
+		xelmnt.setAttribute("ky", str(k[0] * np.sin(k[1])))
+		xelmnt.setAttribute(auattr, "rad")
+	else:
+		raise ValueError("Momentum should be of the form: k; (kx, ky); (k, kphi, angleunit), with angleunit equal to deg, phi, kphi, or rad.")
+	xelmnt.setAttribute(kuattr, "1/nm")
+	return xelmnt
+
+def xml_setmomentumvalue(xroot, xelmnt, k):
+	"""Set current node to momentum value
+
+	Arguments:
+	xroot    An XML Document. The XML root.
+	xelmnt   An XML Element. The element whose properties are set to momentum.
+	k        Vector instance. The momentum value.
+
+	Returns:
+	xelmnt   An XML Element. The modified XML element.
+	"""
+	if isinstance(k, Vector):
+		xtext = xroot.createTextNode(str(k.len()))
+	else:
+		raise TypeError("Invalid format for momentum value")
+	xelmnt.appendChild(xtext)
+	return xelmnt
+
+def xmlvectorgrid(xroot, xparent, vgrid):
+	"""Add a child node with vector grid data.
+	The tag name is <vectorgrid>.
+
+	Arguments:
+	xroot    An XML Document. The XML root.
+	xparent  An XML Element. The parent node for the newly created node.
+	vgrid    VectorGrid instance.
+
+	Returns:
+	An XML Element. The new node.
+	"""
+	xvg = xroot.createElement('vectorgrid')
+	pf = '' if vgrid.prefix is None else vgrid.prefix
+	if pf != '':
+		xvg.setAttribute('q', vgrid.prefix)
+	xvg.setAttribute('vectortype', vgrid.vtype)
+	if vgrid.vtype in ['pol', 'cyl', 'sph']:
+		xvg.setAttribute('angleunits', 'deg' if vgrid.degrees else 'rad')
+
+	for v, val in zip(vgrid.var, vgrid.values):
+		v1 = 'r' if v == '' and pf == '' else pf + v
+		v1 = v1[:-1] if v1 == pf + 'r' else v1
+		xvar = addchild(xroot, xvg, "variable", val)
+		xvar.setAttribute("component", v1)
+		xvar.setAttribute("n", str(len(val)))
+	for c, val in zip(vgrid.const, vgrid.constvalues):
+		c1 = 'r' if c == '' and pf == '' else pf + c
+		c1 = c1[:-1] if c1 == pf + 'r' else c1
+		xconst = addchild(xroot, xvg, "constant", val)
+		xconst.setAttribute("component", c1)
+	xparent.appendChild(xvg)
+	return xvg
+
+def xmldispersion(xroot, xparent, data, observables = None, sort = True):
+	"""Add a child node with dispersion data.
+
+	Arguments:
+	xroot        An XML Document. The XML root.
+	xparent      An XML Element. The parent node for the newly created node.
+	data         DiagData instance.
+	observables  List of strings. Legacy parameter. Do not use!
+	sort         True or False. If True, put eigenvalues at each data point in
+	             ascending order.
+
+	Returns:
+	An XML Element. The new node.
+	"""
+	xdata = xroot.createElement('dispersion')
+	xparent.appendChild(xdata)
+	if data.grid is not None:
+		xvg = xmlvectorgrid(xroot, xdata, data.grid)
+	for d in data:
+		xdatak = xroot.createElement('momentum')
+		xdata.appendChild(xdatak)
+		xdatak = xml_setmomentumattribute(xdatak, d.k)
+
+		d1 = d.sort_by_eival() if sort else d
+		xdatak_en = addchild(xroot, xdatak, "energies", d1.eival)
+		xdatak_en.setAttribute("unit", "meV")
+
+		# band index, ll index, band character
+		if d1.bindex is not None:
+			addchild(xroot, xdatak, "bandindex", d1.bindex)
+		if d1.llindex is not None:
+			addchild(xroot, xdatak, "llindex", d1.llindex)
+		if d1.char is not None:
+			addchild(xroot, xdatak, "characters", ['??' if c.strip() == '' else c for c in d1.char])
+
+		# observables
+		if d1.obsvals is not None:
+			for o in range(0, len(d1.obsvals)):
+				xdatak_obs = addchild(xroot, xdatak, "observable", d1.obsvals[o])
+				if d1.obsids is not None:
+					xdatak_obs.setAttribute("q", str(d1.obsids[o]))
+					if d1.obsids[o] in all_observables:
+						dimful = all_observables.dimful is True
+						q_unit = all_observables[d1.obsids[o]].get_unit_str(style = 'raw', dimful = dimful)
+						if isinstance(q_unit, str) and len(q_unit) > 0:
+							xdatak_obs.setAttribute("unit", q_unit)
+				elif isinstance(observables, list) and o < len(observables):
+					xdatak_obs.setAttribute("q", str(observables[o]))  # legacy
+	return xdata
+
+def xmldependence(xroot, xparent, data, paramval, paramstr, paramunit = "", observables = None, sort = True, dependentvariables = None):
+	"""Add a child node for dependence, for example, of magnetic field.
+
+	Arguments:
+	xroot               An XML Document. The XML root.
+	xparent             An XML Element. The parent node for the newly created
+	                    node.
+	data                DiagData instance.
+	paramval            List of numerical values. Use the values of this list as
+	                    variable/parameter values, unless they are already
+	                    defined in data.
+	paramstr            String. The variable/parameter, e.g., 'b' for magnetic
+	                    field.
+	paramunit           String. Unit of the variable/parameter, e.g., 'T' for
+	                    tesla if the parameter is magnetic field.
+	observables         List of strings. Legacy parameter. Do not use!
+	sort                True or False. If True, put eigenvalues at each data
+	                    point in ascending order.
+	dependentvariables  List of lists. The inner lists must be of length 2 or 3:
+	                    either [values, varname] or [values, varname, varunit],
+	                    where values is an array the same length as data,
+	                    varname is a string, and varunit is a string. These
+	                    encode values that depend on the variable/parameter. If
+	                    None, assume no such variables. (This is a legacy
+	                    option, which might come handy at some point, although
+	                    it is somewhat unlikely that it is ever needed.)
+
+	Returns:
+	An XML Element. The new node.
+	"""
+	xdata = xroot.createElement('dependence')
+	xdata.setAttribute("variable", paramstr)
+	xparent.appendChild(xdata)
+	if data.grid is not None:
+		xvg = xmlvectorgrid(xroot, xdata, data.grid)
+	for j, d in enumerate(data):
+		xdatak = xroot.createElement("variabledata")
+		xdata.appendChild(xdatak)
+
+		pval = paramval[j] if d.paramval is None else d.paramval
+		if isinstance(pval, Vector):
+			attr = pval.xmlattr(prefix = paramstr)
+			for a in attr:
+				xdatak.setAttribute(a, str(attr[a]))
+		else:
+			xdatak.setAttribute(paramstr, str(pval))
+		if len(paramunit) > 0:
+			xdatak.setAttribute("vunit", paramunit)
+			xdatak = xml_setmomentumattribute(xdatak, d.k, kuattr="kunit")
+
+		d1 = d.sort_by_eival() if sort else d
+		xdatak_en = addchild(xroot, xdatak, "energies", d1.eival)
+		xdatak_en.setAttribute("unit", "meV")
+
+		if dependentvariables is not None:
+			for depvar in dependentvariables:
+				if len(depvar) in [2, 3]:
+					xdatak_opt = addchild(xroot, xdatak, depvar[1], str(depvar[0][j]))
+					if len(depvar) == 3:
+						xdatak_opt.setAttribute("unit", depvar[2])
+				else:
+					sys.stderr.write("Warning (XMLDependence): Dependent variables must be passed as [[data, varname], ... ] or [[data, varname, unit], ... ]\n")
+
+		# band index, ll index, band character
+		if d1.bindex is not None:
+			addchild(xroot, xdatak, "bandindex", d1.bindex)
+		if d1.llindex is not None:
+			addchild(xroot, xdatak, "llindex", d1.llindex)
+		if d1.char is not None:
+			addchild(xroot, xdatak, "characters", ['??' if c.strip() == '' else c for c in d1.char])
+
+		# observables
+		if d1.obsvals is not None:
+			for o in range(0, len(d1.obsvals)):
+				xdatak_obs = addchild(xroot, xdatak, "observable", d1.obsvals[o])
+				if d1.obsids is not None:
+					xdatak_obs.setAttribute("q", str(d1.obsids[o]))
+					if d1.obsids[o] in all_observables:
+						dimful = all_observables.dimful is True
+						q_unit = all_observables[d1.obsids[o]].get_unit_str(style = 'raw', dimful = dimful)
+						if isinstance(q_unit, str) and len(q_unit) > 0:
+							xdatak_obs.setAttribute("unit", q_unit)
+				elif isinstance(observables, list) and o < len(observables):
+					xdatak_obs.setAttribute("q", str(observables[o]))  # legacy
+
+	return xdata
+
+def writefile(
+		filename, data = None, params = None, observables = None, caller = None,
+		options = None, dependence = None, dependentoptions = None,
+		bands_extrema = None, version_info = None):
+	"""Write XML file.
+
+	Arguments:
+	data              DiagData instance. Dispersion or dependence data.
+	params            PhysParams instance. Physical parameters.
+	observables
+	caller            String. Filename of the executable script from which this
+	                  function
+	options           Dict instance. Extra options.
+	dependence        None or list of length 2 or 3. If None, assume the data is
+	                  a dispersion. If a list, it must be of the form
+	                  [values, paramstr] or [values, paramstr, paramunit], where
+	                  value is an array the same length as data, paramstr is a
+	                  string, and paramunit is a string.
+	dependentoptions  Dict instance, whose keys are option keys (strings) and
+	                  whose values are lists of the same length as data.
+	bands_extrema     Dict instance, whose keys are band labels and whose values
+	                  are lists of BandExtremum instances.
+	version_info      String. Version info.
+
+	Note:
+	All arguments may be None, which means that the information in question will
+	either not appear in the output file, or its value will be determined
+	automatically.
+
+	No return value.
+	"""
+	# create minidom-document
+	xr = xm.Document()
+
+	# create root element
+	xrn = xr.createElement('datafile')
+	xr.appendChild(xrn)
+
+	# info element
+	xmlheader(xr, xrn, caller = caller, version_info = version_info)
+
+	# configuration element
+	xmlconfig(xr, xrn)
+
+	# parameters element
+	if params is not None:
+		xmlparams(xr, xrn, params)
+
+	# options element
+	dependentvariables = []
+	if options is not None:
+		xmloptions(xr, xrn, options)
+
+		# process "dependent option" values
+		if dependentoptions is not None:
+			for depopt in dependentoptions:
+				if depopt in options and isinstance(options[depopt], list) and len(options[depopt]) == len(data):
+					dependentvariables.append([options[depopt], depopt])
+					if depopt in ['e1shift', 'e1shift_up', 'e1shift_dn']:
+						dependentvariables[-1].append("meV")
+	if dependentvariables == []:
+		dependentvariables = None
+
+	if bands_extrema is not None:
+		addchild_bands_extrema(xr, xrn, 'extrema', bands_extrema)
+
+	if data is not None:
+		if dependence is None:
+			xmldispersion(xr, xrn, data, observables = observables)
+		elif len(dependence) == 2 and len(dependence[0]) == len(data):
+			xmldependence(xr, xrn, data, dependence[0], dependence[1], "", observables = observables, dependentvariables = dependentvariables)
+		elif len(dependence) == 3 and len(dependence[0]) == len(data):
+			xmldependence(xr, xrn, data, dependence[0], dependence[1], dependence[2], observables = observables, dependentvariables = dependentvariables)
+		else:
+			sys.stderr.write("Warning: Combination of data and dependence is not correct. No data written.\n")
+
+	f = open(filename, "w", encoding = 'utf-8')
+	f.write(xr.toprettyxml(indent='\t'))
+	f.close()