Publish
This commit is contained in:
commit
b7053ad014
244
.gitignore
vendored
Normal file
244
.gitignore
vendored
Normal file
|
@ -0,0 +1,244 @@
|
|||
|
||||
# Created by https://www.gitignore.io/api/python,pycharm,pycharm+all,pycharm+iml
|
||||
|
||||
### PyCharm ###
|
||||
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm
|
||||
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
|
||||
|
||||
# User-specific stuff:
|
||||
.idea/**/workspace.xml
|
||||
.idea/**/tasks.xml
|
||||
.idea/dictionaries
|
||||
|
||||
# Sensitive or high-churn files:
|
||||
.idea/**/dataSources/
|
||||
.idea/**/dataSources.ids
|
||||
.idea/**/dataSources.xml
|
||||
.idea/**/dataSources.local.xml
|
||||
.idea/**/sqlDataSources.xml
|
||||
.idea/**/dynamic.xml
|
||||
.idea/**/uiDesigner.xml
|
||||
|
||||
# Gradle:
|
||||
.idea/**/gradle.xml
|
||||
.idea/**/libraries
|
||||
|
||||
# CMake
|
||||
cmake-build-debug/
|
||||
|
||||
# Mongo Explorer plugin:
|
||||
.idea/**/mongoSettings.xml
|
||||
|
||||
## File-based project format:
|
||||
*.iws
|
||||
|
||||
## Plugin-specific files:
|
||||
|
||||
# IntelliJ
|
||||
/out/
|
||||
|
||||
# mpeltonen/sbt-idea plugin
|
||||
.idea_modules/
|
||||
|
||||
# JIRA plugin
|
||||
atlassian-ide-plugin.xml
|
||||
|
||||
# Cursive Clojure plugin
|
||||
.idea/replstate.xml
|
||||
|
||||
# Ruby plugin and RubyMine
|
||||
/.rakeTasks
|
||||
|
||||
# Crashlytics plugin (for Android Studio and IntelliJ)
|
||||
com_crashlytics_export_strings.xml
|
||||
crashlytics.properties
|
||||
crashlytics-build.properties
|
||||
fabric.properties
|
||||
|
||||
### PyCharm Patch ###
|
||||
# Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721
|
||||
|
||||
# *.iml
|
||||
# modules.xml
|
||||
# .idea/misc.xml
|
||||
# *.ipr
|
||||
|
||||
# Sonarlint plugin
|
||||
.idea/sonarlint
|
||||
|
||||
### PyCharm+all ###
|
||||
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm
|
||||
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
|
||||
|
||||
# User-specific stuff:
|
||||
|
||||
# Sensitive or high-churn files:
|
||||
|
||||
# Gradle:
|
||||
|
||||
# CMake
|
||||
|
||||
# Mongo Explorer plugin:
|
||||
|
||||
## File-based project format:
|
||||
|
||||
## Plugin-specific files:
|
||||
|
||||
# IntelliJ
|
||||
|
||||
# mpeltonen/sbt-idea plugin
|
||||
|
||||
# JIRA plugin
|
||||
|
||||
# Cursive Clojure plugin
|
||||
|
||||
# Ruby plugin and RubyMine
|
||||
|
||||
# Crashlytics plugin (for Android Studio and IntelliJ)
|
||||
|
||||
### PyCharm+all Patch ###
|
||||
# Ignores the whole idea folder
|
||||
# See https://github.com/joeblau/gitignore.io/issues/186 and https://github.com/joeblau/gitignore.io/issues/360
|
||||
|
||||
.idea/
|
||||
|
||||
### PyCharm+iml ###
|
||||
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm
|
||||
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
|
||||
|
||||
# User-specific stuff:
|
||||
|
||||
# Sensitive or high-churn files:
|
||||
|
||||
# Gradle:
|
||||
|
||||
# CMake
|
||||
|
||||
# Mongo Explorer plugin:
|
||||
|
||||
## File-based project format:
|
||||
|
||||
## Plugin-specific files:
|
||||
|
||||
# IntelliJ
|
||||
|
||||
# mpeltonen/sbt-idea plugin
|
||||
|
||||
# JIRA plugin
|
||||
|
||||
# Cursive Clojure plugin
|
||||
|
||||
# Ruby plugin and RubyMine
|
||||
|
||||
# Crashlytics plugin (for Android Studio and IntelliJ)
|
||||
|
||||
### PyCharm+iml Patch ###
|
||||
# Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-249601023
|
||||
|
||||
*.iml
|
||||
modules.xml
|
||||
.idea/misc.xml
|
||||
*.ipr
|
||||
|
||||
### Python ###
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
.pytest_cache/
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
.hypothesis/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# pyenv
|
||||
.python-version
|
||||
|
||||
# celery beat schedule file
|
||||
celerybeat-schedule.*
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
|
||||
|
||||
# End of https://www.gitignore.io/api/python,pycharm,pycharm+all,pycharm+iml
|
674
LICENSE.txt
Normal file
674
LICENSE.txt
Normal file
|
@ -0,0 +1,674 @@
|
|||
GNU GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
|
||||
The GNU General Public License is a free, copyleft license for
|
||||
software and other kinds of works.
|
||||
|
||||
The licenses for most software and other practical works are designed
|
||||
to take away your freedom to share and change the works. By contrast,
|
||||
the GNU General Public License is intended to guarantee your freedom to
|
||||
share and change all versions of a program--to make sure it remains free
|
||||
software for all its users. We, the Free Software Foundation, use the
|
||||
GNU General Public License for most of our software; it applies also to
|
||||
any other work released this way by its authors. You can apply it to
|
||||
your programs, too.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
them if you wish), that you receive source code or can get it if you
|
||||
want it, that you can change the software or use pieces of it in new
|
||||
free programs, and that you know you can do these things.
|
||||
|
||||
To protect your rights, we need to prevent others from denying you
|
||||
these rights or asking you to surrender the rights. Therefore, you have
|
||||
certain responsibilities if you distribute copies of the software, or if
|
||||
you modify it: responsibilities to respect the freedom of others.
|
||||
|
||||
For example, if you distribute copies of such a program, whether
|
||||
gratis or for a fee, you must pass on to the recipients the same
|
||||
freedoms that you received. You must make sure that they, too, receive
|
||||
or can get the source code. And you must show them these terms so they
|
||||
know their rights.
|
||||
|
||||
Developers that use the GNU GPL protect your rights with two steps:
|
||||
(1) assert copyright on the software, and (2) offer you this License
|
||||
giving you legal permission to copy, distribute and/or modify it.
|
||||
|
||||
For the developers' and authors' protection, the GPL clearly explains
|
||||
that there is no warranty for this free software. For both users' and
|
||||
authors' sake, the GPL requires that modified versions be marked as
|
||||
changed, so that their problems will not be attributed erroneously to
|
||||
authors of previous versions.
|
||||
|
||||
Some devices are designed to deny users access to install or run
|
||||
modified versions of the software inside them, although the manufacturer
|
||||
can do so. This is fundamentally incompatible with the aim of
|
||||
protecting users' freedom to change the software. The systematic
|
||||
pattern of such abuse occurs in the area of products for individuals to
|
||||
use, which is precisely where it is most unacceptable. Therefore, we
|
||||
have designed this version of the GPL to prohibit the practice for those
|
||||
products. If such problems arise substantially in other domains, we
|
||||
stand ready to extend this provision to those domains in future versions
|
||||
of the GPL, as needed to protect the freedom of users.
|
||||
|
||||
Finally, every program is threatened constantly by software patents.
|
||||
States should not allow patents to restrict development and use of
|
||||
software on general-purpose computers, but in those that do, we wish to
|
||||
avoid the special danger that patents applied to a free program could
|
||||
make it effectively proprietary. To prevent this, the GPL assures that
|
||||
patents cannot be used to render the program non-free.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
TERMS AND CONDITIONS
|
||||
|
||||
0. Definitions.
|
||||
|
||||
"This License" refers to version 3 of the GNU General Public License.
|
||||
|
||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||
works, such as semiconductor masks.
|
||||
|
||||
"The Program" refers to any copyrightable work licensed under this
|
||||
License. Each licensee is addressed as "you". "Licensees" and
|
||||
"recipients" may be individuals or organizations.
|
||||
|
||||
To "modify" a work means to copy from or adapt all or part of the work
|
||||
in a fashion requiring copyright permission, other than the making of an
|
||||
exact copy. The resulting work is called a "modified version" of the
|
||||
earlier work or a work "based on" the earlier work.
|
||||
|
||||
A "covered work" means either the unmodified Program or a work based
|
||||
on the Program.
|
||||
|
||||
To "propagate" a work means to do anything with it that, without
|
||||
permission, would make you directly or secondarily liable for
|
||||
infringement under applicable copyright law, except executing it on a
|
||||
computer or modifying a private copy. Propagation includes copying,
|
||||
distribution (with or without modification), making available to the
|
||||
public, and in some countries other activities as well.
|
||||
|
||||
To "convey" a work means any kind of propagation that enables other
|
||||
parties to make or receive copies. Mere interaction with a user through
|
||||
a computer network, with no transfer of a copy, is not conveying.
|
||||
|
||||
An interactive user interface displays "Appropriate Legal Notices"
|
||||
to the extent that it includes a convenient and prominently visible
|
||||
feature that (1) displays an appropriate copyright notice, and (2)
|
||||
tells the user that there is no warranty for the work (except to the
|
||||
extent that warranties are provided), that licensees may convey the
|
||||
work under this License, and how to view a copy of this License. If
|
||||
the interface presents a list of user commands or options, such as a
|
||||
menu, a prominent item in the list meets this criterion.
|
||||
|
||||
1. Source Code.
|
||||
|
||||
The "source code" for a work means the preferred form of the work
|
||||
for making modifications to it. "Object code" means any non-source
|
||||
form of a work.
|
||||
|
||||
A "Standard Interface" means an interface that either is an official
|
||||
standard defined by a recognized standards body, or, in the case of
|
||||
interfaces specified for a particular programming language, one that
|
||||
is widely used among developers working in that language.
|
||||
|
||||
The "System Libraries" of an executable work include anything, other
|
||||
than the work as a whole, that (a) is included in the normal form of
|
||||
packaging a Major Component, but which is not part of that Major
|
||||
Component, and (b) serves only to enable use of the work with that
|
||||
Major Component, or to implement a Standard Interface for which an
|
||||
implementation is available to the public in source code form. A
|
||||
"Major Component", in this context, means a major essential component
|
||||
(kernel, window system, and so on) of the specific operating system
|
||||
(if any) on which the executable work runs, or a compiler used to
|
||||
produce the work, or an object code interpreter used to run it.
|
||||
|
||||
The "Corresponding Source" for a work in object code form means all
|
||||
the source code needed to generate, install, and (for an executable
|
||||
work) run the object code and to modify the work, including scripts to
|
||||
control those activities. However, it does not include the work's
|
||||
System Libraries, or general-purpose tools or generally available free
|
||||
programs which are used unmodified in performing those activities but
|
||||
which are not part of the work. For example, Corresponding Source
|
||||
includes interface definition files associated with source files for
|
||||
the work, and the source code for shared libraries and dynamically
|
||||
linked subprograms that the work is specifically designed to require,
|
||||
such as by intimate data communication or control flow between those
|
||||
subprograms and other parts of the work.
|
||||
|
||||
The Corresponding Source need not include anything that users
|
||||
can regenerate automatically from other parts of the Corresponding
|
||||
Source.
|
||||
|
||||
The Corresponding Source for a work in source code form is that
|
||||
same work.
|
||||
|
||||
2. Basic Permissions.
|
||||
|
||||
All rights granted under this License are granted for the term of
|
||||
copyright on the Program, and are irrevocable provided the stated
|
||||
conditions are met. This License explicitly affirms your unlimited
|
||||
permission to run the unmodified Program. The output from running a
|
||||
covered work is covered by this License only if the output, given its
|
||||
content, constitutes a covered work. This License acknowledges your
|
||||
rights of fair use or other equivalent, as provided by copyright law.
|
||||
|
||||
You may make, run and propagate covered works that you do not
|
||||
convey, without conditions so long as your license otherwise remains
|
||||
in force. You may convey covered works to others for the sole purpose
|
||||
of having them make modifications exclusively for you, or provide you
|
||||
with facilities for running those works, provided that you comply with
|
||||
the terms of this License in conveying all material for which you do
|
||||
not control copyright. Those thus making or running the covered works
|
||||
for you must do so exclusively on your behalf, under your direction
|
||||
and control, on terms that prohibit them from making any copies of
|
||||
your copyrighted material outside their relationship with you.
|
||||
|
||||
Conveying under any other circumstances is permitted solely under
|
||||
the conditions stated below. Sublicensing is not allowed; section 10
|
||||
makes it unnecessary.
|
||||
|
||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||
|
||||
No covered work shall be deemed part of an effective technological
|
||||
measure under any applicable law fulfilling obligations under article
|
||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||
similar laws prohibiting or restricting circumvention of such
|
||||
measures.
|
||||
|
||||
When you convey a covered work, you waive any legal power to forbid
|
||||
circumvention of technological measures to the extent such circumvention
|
||||
is effected by exercising rights under this License with respect to
|
||||
the covered work, and you disclaim any intention to limit operation or
|
||||
modification of the work as a means of enforcing, against the work's
|
||||
users, your or third parties' legal rights to forbid circumvention of
|
||||
technological measures.
|
||||
|
||||
4. Conveying Verbatim Copies.
|
||||
|
||||
You may convey verbatim copies of the Program's source code as you
|
||||
receive it, in any medium, provided that you conspicuously and
|
||||
appropriately publish on each copy an appropriate copyright notice;
|
||||
keep intact all notices stating that this License and any
|
||||
non-permissive terms added in accord with section 7 apply to the code;
|
||||
keep intact all notices of the absence of any warranty; and give all
|
||||
recipients a copy of this License along with the Program.
|
||||
|
||||
You may charge any price or no price for each copy that you convey,
|
||||
and you may offer support or warranty protection for a fee.
|
||||
|
||||
5. Conveying Modified Source Versions.
|
||||
|
||||
You may convey a work based on the Program, or the modifications to
|
||||
produce it from the Program, in the form of source code under the
|
||||
terms of section 4, provided that you also meet all of these conditions:
|
||||
|
||||
a) The work must carry prominent notices stating that you modified
|
||||
it, and giving a relevant date.
|
||||
|
||||
b) The work must carry prominent notices stating that it is
|
||||
released under this License and any conditions added under section
|
||||
7. This requirement modifies the requirement in section 4 to
|
||||
"keep intact all notices".
|
||||
|
||||
c) You must license the entire work, as a whole, under this
|
||||
License to anyone who comes into possession of a copy. This
|
||||
License will therefore apply, along with any applicable section 7
|
||||
additional terms, to the whole of the work, and all its parts,
|
||||
regardless of how they are packaged. This License gives no
|
||||
permission to license the work in any other way, but it does not
|
||||
invalidate such permission if you have separately received it.
|
||||
|
||||
d) If the work has interactive user interfaces, each must display
|
||||
Appropriate Legal Notices; however, if the Program has interactive
|
||||
interfaces that do not display Appropriate Legal Notices, your
|
||||
work need not make them do so.
|
||||
|
||||
A compilation of a covered work with other separate and independent
|
||||
works, which are not by their nature extensions of the covered work,
|
||||
and which are not combined with it such as to form a larger program,
|
||||
in or on a volume of a storage or distribution medium, is called an
|
||||
"aggregate" if the compilation and its resulting copyright are not
|
||||
used to limit the access or legal rights of the compilation's users
|
||||
beyond what the individual works permit. Inclusion of a covered work
|
||||
in an aggregate does not cause this License to apply to the other
|
||||
parts of the aggregate.
|
||||
|
||||
6. Conveying Non-Source Forms.
|
||||
|
||||
You may convey a covered work in object code form under the terms
|
||||
of sections 4 and 5, provided that you also convey the
|
||||
machine-readable Corresponding Source under the terms of this License,
|
||||
in one of these ways:
|
||||
|
||||
a) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by the
|
||||
Corresponding Source fixed on a durable physical medium
|
||||
customarily used for software interchange.
|
||||
|
||||
b) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by a
|
||||
written offer, valid for at least three years and valid for as
|
||||
long as you offer spare parts or customer support for that product
|
||||
model, to give anyone who possesses the object code either (1) a
|
||||
copy of the Corresponding Source for all the software in the
|
||||
product that is covered by this License, on a durable physical
|
||||
medium customarily used for software interchange, for a price no
|
||||
more than your reasonable cost of physically performing this
|
||||
conveying of source, or (2) access to copy the
|
||||
Corresponding Source from a network server at no charge.
|
||||
|
||||
c) Convey individual copies of the object code with a copy of the
|
||||
written offer to provide the Corresponding Source. This
|
||||
alternative is allowed only occasionally and noncommercially, and
|
||||
only if you received the object code with such an offer, in accord
|
||||
with subsection 6b.
|
||||
|
||||
d) Convey the object code by offering access from a designated
|
||||
place (gratis or for a charge), and offer equivalent access to the
|
||||
Corresponding Source in the same way through the same place at no
|
||||
further charge. You need not require recipients to copy the
|
||||
Corresponding Source along with the object code. If the place to
|
||||
copy the object code is a network server, the Corresponding Source
|
||||
may be on a different server (operated by you or a third party)
|
||||
that supports equivalent copying facilities, provided you maintain
|
||||
clear directions next to the object code saying where to find the
|
||||
Corresponding Source. Regardless of what server hosts the
|
||||
Corresponding Source, you remain obligated to ensure that it is
|
||||
available for as long as needed to satisfy these requirements.
|
||||
|
||||
e) Convey the object code using peer-to-peer transmission, provided
|
||||
you inform other peers where the object code and Corresponding
|
||||
Source of the work are being offered to the general public at no
|
||||
charge under subsection 6d.
|
||||
|
||||
A separable portion of the object code, whose source code is excluded
|
||||
from the Corresponding Source as a System Library, need not be
|
||||
included in conveying the object code work.
|
||||
|
||||
A "User Product" is either (1) a "consumer product", which means any
|
||||
tangible personal property which is normally used for personal, family,
|
||||
or household purposes, or (2) anything designed or sold for incorporation
|
||||
into a dwelling. In determining whether a product is a consumer product,
|
||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||
product received by a particular user, "normally used" refers to a
|
||||
typical or common use of that class of product, regardless of the status
|
||||
of the particular user or of the way in which the particular user
|
||||
actually uses, or expects or is expected to use, the product. A product
|
||||
is a consumer product regardless of whether the product has substantial
|
||||
commercial, industrial or non-consumer uses, unless such uses represent
|
||||
the only significant mode of use of the product.
|
||||
|
||||
"Installation Information" for a User Product means any methods,
|
||||
procedures, authorization keys, or other information required to install
|
||||
and execute modified versions of a covered work in that User Product from
|
||||
a modified version of its Corresponding Source. The information must
|
||||
suffice to ensure that the continued functioning of the modified object
|
||||
code is in no case prevented or interfered with solely because
|
||||
modification has been made.
|
||||
|
||||
If you convey an object code work under this section in, or with, or
|
||||
specifically for use in, a User Product, and the conveying occurs as
|
||||
part of a transaction in which the right of possession and use of the
|
||||
User Product is transferred to the recipient in perpetuity or for a
|
||||
fixed term (regardless of how the transaction is characterized), the
|
||||
Corresponding Source conveyed under this section must be accompanied
|
||||
by the Installation Information. But this requirement does not apply
|
||||
if neither you nor any third party retains the ability to install
|
||||
modified object code on the User Product (for example, the work has
|
||||
been installed in ROM).
|
||||
|
||||
The requirement to provide Installation Information does not include a
|
||||
requirement to continue to provide support service, warranty, or updates
|
||||
for a work that has been modified or installed by the recipient, or for
|
||||
the User Product in which it has been modified or installed. Access to a
|
||||
network may be denied when the modification itself materially and
|
||||
adversely affects the operation of the network or violates the rules and
|
||||
protocols for communication across the network.
|
||||
|
||||
Corresponding Source conveyed, and Installation Information provided,
|
||||
in accord with this section must be in a format that is publicly
|
||||
documented (and with an implementation available to the public in
|
||||
source code form), and must require no special password or key for
|
||||
unpacking, reading or copying.
|
||||
|
||||
7. Additional Terms.
|
||||
|
||||
"Additional permissions" are terms that supplement the terms of this
|
||||
License by making exceptions from one or more of its conditions.
|
||||
Additional permissions that are applicable to the entire Program shall
|
||||
be treated as though they were included in this License, to the extent
|
||||
that they are valid under applicable law. If additional permissions
|
||||
apply only to part of the Program, that part may be used separately
|
||||
under those permissions, but the entire Program remains governed by
|
||||
this License without regard to the additional permissions.
|
||||
|
||||
When you convey a copy of a covered work, you may at your option
|
||||
remove any additional permissions from that copy, or from any part of
|
||||
it. (Additional permissions may be written to require their own
|
||||
removal in certain cases when you modify the work.) You may place
|
||||
additional permissions on material, added by you to a covered work,
|
||||
for which you have or can give appropriate copyright permission.
|
||||
|
||||
Notwithstanding any other provision of this License, for material you
|
||||
add to a covered work, you may (if authorized by the copyright holders of
|
||||
that material) supplement the terms of this License with terms:
|
||||
|
||||
a) Disclaiming warranty or limiting liability differently from the
|
||||
terms of sections 15 and 16 of this License; or
|
||||
|
||||
b) Requiring preservation of specified reasonable legal notices or
|
||||
author attributions in that material or in the Appropriate Legal
|
||||
Notices displayed by works containing it; or
|
||||
|
||||
c) Prohibiting misrepresentation of the origin of that material, or
|
||||
requiring that modified versions of such material be marked in
|
||||
reasonable ways as different from the original version; or
|
||||
|
||||
d) Limiting the use for publicity purposes of names of licensors or
|
||||
authors of the material; or
|
||||
|
||||
e) Declining to grant rights under trademark law for use of some
|
||||
trade names, trademarks, or service marks; or
|
||||
|
||||
f) Requiring indemnification of licensors and authors of that
|
||||
material by anyone who conveys the material (or modified versions of
|
||||
it) with contractual assumptions of liability to the recipient, for
|
||||
any liability that these contractual assumptions directly impose on
|
||||
those licensors and authors.
|
||||
|
||||
All other non-permissive additional terms are considered "further
|
||||
restrictions" within the meaning of section 10. If the Program as you
|
||||
received it, or any part of it, contains a notice stating that it is
|
||||
governed by this License along with a term that is a further
|
||||
restriction, you may remove that term. If a license document contains
|
||||
a further restriction but permits relicensing or conveying under this
|
||||
License, you may add to a covered work material governed by the terms
|
||||
of that license document, provided that the further restriction does
|
||||
not survive such relicensing or conveying.
|
||||
|
||||
If you add terms to a covered work in accord with this section, you
|
||||
must place, in the relevant source files, a statement of the
|
||||
additional terms that apply to those files, or a notice indicating
|
||||
where to find the applicable terms.
|
||||
|
||||
Additional terms, permissive or non-permissive, may be stated in the
|
||||
form of a separately written license, or stated as exceptions;
|
||||
the above requirements apply either way.
|
||||
|
||||
8. Termination.
|
||||
|
||||
You may not propagate or modify a covered work except as expressly
|
||||
provided under this License. Any attempt otherwise to propagate or
|
||||
modify it is void, and will automatically terminate your rights under
|
||||
this License (including any patent licenses granted under the third
|
||||
paragraph of section 11).
|
||||
|
||||
However, if you cease all violation of this License, then your
|
||||
license from a particular copyright holder is reinstated (a)
|
||||
provisionally, unless and until the copyright holder explicitly and
|
||||
finally terminates your license, and (b) permanently, if the copyright
|
||||
holder fails to notify you of the violation by some reasonable means
|
||||
prior to 60 days after the cessation.
|
||||
|
||||
Moreover, your license from a particular copyright holder is
|
||||
reinstated permanently if the copyright holder notifies you of the
|
||||
violation by some reasonable means, this is the first time you have
|
||||
received notice of violation of this License (for any work) from that
|
||||
copyright holder, and you cure the violation prior to 30 days after
|
||||
your receipt of the notice.
|
||||
|
||||
Termination of your rights under this section does not terminate the
|
||||
licenses of parties who have received copies or rights from you under
|
||||
this License. If your rights have been terminated and not permanently
|
||||
reinstated, you do not qualify to receive new licenses for the same
|
||||
material under section 10.
|
||||
|
||||
9. Acceptance Not Required for Having Copies.
|
||||
|
||||
You are not required to accept this License in order to receive or
|
||||
run a copy of the Program. Ancillary propagation of a covered work
|
||||
occurring solely as a consequence of using peer-to-peer transmission
|
||||
to receive a copy likewise does not require acceptance. However,
|
||||
nothing other than this License grants you permission to propagate or
|
||||
modify any covered work. These actions infringe copyright if you do
|
||||
not accept this License. Therefore, by modifying or propagating a
|
||||
covered work, you indicate your acceptance of this License to do so.
|
||||
|
||||
10. Automatic Licensing of Downstream Recipients.
|
||||
|
||||
Each time you convey a covered work, the recipient automatically
|
||||
receives a license from the original licensors, to run, modify and
|
||||
propagate that work, subject to this License. You are not responsible
|
||||
for enforcing compliance by third parties with this License.
|
||||
|
||||
An "entity transaction" is a transaction transferring control of an
|
||||
organization, or substantially all assets of one, or subdividing an
|
||||
organization, or merging organizations. If propagation of a covered
|
||||
work results from an entity transaction, each party to that
|
||||
transaction who receives a copy of the work also receives whatever
|
||||
licenses to the work the party's predecessor in interest had or could
|
||||
give under the previous paragraph, plus a right to possession of the
|
||||
Corresponding Source of the work from the predecessor in interest, if
|
||||
the predecessor has it or can get it with reasonable efforts.
|
||||
|
||||
You may not impose any further restrictions on the exercise of the
|
||||
rights granted or affirmed under this License. For example, you may
|
||||
not impose a license fee, royalty, or other charge for exercise of
|
||||
rights granted under this License, and you may not initiate litigation
|
||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||
any patent claim is infringed by making, using, selling, offering for
|
||||
sale, or importing the Program or any portion of it.
|
||||
|
||||
11. Patents.
|
||||
|
||||
A "contributor" is a copyright holder who authorizes use under this
|
||||
License of the Program or a work on which the Program is based. The
|
||||
work thus licensed is called the contributor's "contributor version".
|
||||
|
||||
A contributor's "essential patent claims" are all patent claims
|
||||
owned or controlled by the contributor, whether already acquired or
|
||||
hereafter acquired, that would be infringed by some manner, permitted
|
||||
by this License, of making, using, or selling its contributor version,
|
||||
but do not include claims that would be infringed only as a
|
||||
consequence of further modification of the contributor version. For
|
||||
purposes of this definition, "control" includes the right to grant
|
||||
patent sublicenses in a manner consistent with the requirements of
|
||||
this License.
|
||||
|
||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||
patent license under the contributor's essential patent claims, to
|
||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||
propagate the contents of its contributor version.
|
||||
|
||||
In the following three paragraphs, a "patent license" is any express
|
||||
agreement or commitment, however denominated, not to enforce a patent
|
||||
(such as an express permission to practice a patent or covenant not to
|
||||
sue for patent infringement). To "grant" such a patent license to a
|
||||
party means to make such an agreement or commitment not to enforce a
|
||||
patent against the party.
|
||||
|
||||
If you convey a covered work, knowingly relying on a patent license,
|
||||
and the Corresponding Source of the work is not available for anyone
|
||||
to copy, free of charge and under the terms of this License, through a
|
||||
publicly available network server or other readily accessible means,
|
||||
then you must either (1) cause the Corresponding Source to be so
|
||||
available, or (2) arrange to deprive yourself of the benefit of the
|
||||
patent license for this particular work, or (3) arrange, in a manner
|
||||
consistent with the requirements of this License, to extend the patent
|
||||
license to downstream recipients. "Knowingly relying" means you have
|
||||
actual knowledge that, but for the patent license, your conveying the
|
||||
covered work in a country, or your recipient's use of the covered work
|
||||
in a country, would infringe one or more identifiable patents in that
|
||||
country that you have reason to believe are valid.
|
||||
|
||||
If, pursuant to or in connection with a single transaction or
|
||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||
covered work, and grant a patent license to some of the parties
|
||||
receiving the covered work authorizing them to use, propagate, modify
|
||||
or convey a specific copy of the covered work, then the patent license
|
||||
you grant is automatically extended to all recipients of the covered
|
||||
work and works based on it.
|
||||
|
||||
A patent license is "discriminatory" if it does not include within
|
||||
the scope of its coverage, prohibits the exercise of, or is
|
||||
conditioned on the non-exercise of one or more of the rights that are
|
||||
specifically granted under this License. You may not convey a covered
|
||||
work if you are a party to an arrangement with a third party that is
|
||||
in the business of distributing software, under which you make payment
|
||||
to the third party based on the extent of your activity of conveying
|
||||
the work, and under which the third party grants, to any of the
|
||||
parties who would receive the covered work from you, a discriminatory
|
||||
patent license (a) in connection with copies of the covered work
|
||||
conveyed by you (or copies made from those copies), or (b) primarily
|
||||
for and in connection with specific products or compilations that
|
||||
contain the covered work, unless you entered into that arrangement,
|
||||
or that patent license was granted, prior to 28 March 2007.
|
||||
|
||||
Nothing in this License shall be construed as excluding or limiting
|
||||
any implied license or other defenses to infringement that may
|
||||
otherwise be available to you under applicable patent law.
|
||||
|
||||
12. No Surrender of Others' Freedom.
|
||||
|
||||
If conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot convey a
|
||||
covered work so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you may
|
||||
not convey it at all. For example, if you agree to terms that obligate you
|
||||
to collect a royalty for further conveying from those to whom you convey
|
||||
the Program, the only way you could satisfy both those terms and this
|
||||
License would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Use with the GNU Affero General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, you have
|
||||
permission to link or combine any covered work with a work licensed
|
||||
under version 3 of the GNU Affero General Public License into a single
|
||||
combined work, and to convey the resulting work. The terms of this
|
||||
License will continue to apply to the part which is the covered work,
|
||||
but the special requirements of the GNU Affero General Public License,
|
||||
section 13, concerning interaction through a network will apply to the
|
||||
combination as such.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of
|
||||
the GNU General Public License from time to time. Such new versions will
|
||||
be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Program specifies that a certain numbered version of the GNU General
|
||||
Public License "or any later version" applies to it, you have the
|
||||
option of following the terms and conditions either of that numbered
|
||||
version or of any later version published by the Free Software
|
||||
Foundation. If the Program does not specify a version number of the
|
||||
GNU General Public License, you may choose any version ever published
|
||||
by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future
|
||||
versions of the GNU General Public License can be used, that proxy's
|
||||
public statement of acceptance of a version permanently authorizes you
|
||||
to choose that version for the Program.
|
||||
|
||||
Later license versions may give you additional or different
|
||||
permissions. However, no additional obligations are imposed on any
|
||||
author or copyright holder as a result of your choosing to follow a
|
||||
later version.
|
||||
|
||||
15. Disclaimer of Warranty.
|
||||
|
||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||
|
||||
16. Limitation of Liability.
|
||||
|
||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGES.
|
||||
|
||||
17. Interpretation of Sections 15 and 16.
|
||||
|
||||
If the disclaimer of warranty and limitation of liability provided
|
||||
above cannot be given local legal effect according to their terms,
|
||||
reviewing courts shall apply local law that most closely approximates
|
||||
an absolute waiver of all civil liability in connection with the
|
||||
Program, unless a warranty or assumption of liability accompanies a
|
||||
copy of the Program in return for a fee.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
state the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If the program does terminal interaction, make it output a short
|
||||
notice like this when it starts in an interactive mode:
|
||||
|
||||
<program> Copyright (C) <year> <name of author>
|
||||
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||
This is free software, and you are welcome to redistribute it
|
||||
under certain conditions; type `show c' for details.
|
||||
|
||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||
parts of the General Public License. Of course, your program's commands
|
||||
might be different; for a GUI interface, you would use an "about box".
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU GPL, see
|
||||
<https://www.gnu.org/licenses/>.
|
||||
|
||||
The GNU General Public License does not permit incorporating your program
|
||||
into proprietary programs. If your program is a subroutine library, you
|
||||
may consider it more useful to permit linking proprietary applications with
|
||||
the library. If this is what you want to do, use the GNU Lesser General
|
||||
Public License instead of this License. But first, please read
|
||||
<https://www.gnu.org/licenses/why-not-lgpl.html>.
|
97
README.md
Normal file
97
README.md
Normal file
|
@ -0,0 +1,97 @@
|
|||
# Aucoin: *A distributed cryptocurrency*
|
||||
|
||||
## Quick Start
|
||||
### Installation
|
||||
The easiest way to install Aucoin is to use pip:
|
||||
|
||||
```bash
|
||||
pip3 install --upgrade https://git.caspervk.net/caspervk/aucoin/archive/master.tar.gz
|
||||
```
|
||||
|
||||
**Aucoin requires Python 3.6 or later**. Python 3.6 is available in many distributions but you may need to build it on Debian. See the *Detailed Install Instructions* section for further information.
|
||||
|
||||
### Usage
|
||||
The program can be started by running `aucoin` or `python3 -m aucoin` depending on system configuration. Aucoin uses TCP port 8334 to communicate with other nodes; you may need to open it in your firewall.
|
||||
|
||||
```text
|
||||
Usage: aucoin [OPTIONS]
|
||||
|
||||
Options:
|
||||
-m, --miners INTEGER Number of mining processors. [default: 1]
|
||||
-p, --max-peers INTEGER Maximum number of network peers. [default: 100]
|
||||
-i, --interface TEXT Network interface to bind to. [default: 0.0.0.0]
|
||||
-s, --seed TEXT Nodes to connect to. Overrides DNS seeds and saved
|
||||
peer database. Can be specified multiple times.
|
||||
-v, --verbose Increase verbosity. Can be used multiple times.
|
||||
--no-catch-up Skip catching up to the rest of the network before
|
||||
starting miner and CLI.
|
||||
--fast-unsafe-catch-up Catch up much faster by downloading the blockchain
|
||||
database from central server (aucoin.network).
|
||||
--statistics Log statistics to .aucoin/statistics/stats.json.
|
||||
--clean Remove data directory (blockchain, wallet, etc).
|
||||
--help Show this message and exit.
|
||||
```
|
||||
|
||||
### Updating
|
||||
Update by issuing the same command as with installation.
|
||||
|
||||
## Screenshots
|
||||
### Node status
|
||||
![Node status](images/status.png)
|
||||
### Transaction history
|
||||
![Transaction history](images/history.png)
|
||||
### Catching up
|
||||
![Catching up](images/catchup.png)
|
||||
|
||||
## Detailed Install Instructions
|
||||
### Building Python 3.6
|
||||
The following will build and install Python 3.6 on Debian Stretch:
|
||||
|
||||
```bash
|
||||
apt install build-essential libssl-dev zlib1g-dev libbz2-dev libreadline-dev libsqlite3-dev liblzma-dev libgdbm-dev tk-dev
|
||||
wget https://www.python.org/ftp/python/3.6.4/Python-3.6.4.tgz
|
||||
tar xf Python-3.6.4.tgz
|
||||
cd Python-3.6.4
|
||||
./configure --enable-optimizations
|
||||
make
|
||||
make altinstall # https://docs.python.org/3/using/unix.html#building-python
|
||||
```
|
||||
|
||||
Replace `python3` and `pip3` with `python3.6` and `pip3.6`, respectively, throughout this document if Python 3.6 was built manually.
|
||||
|
||||
### Development Setup
|
||||
To get started developing on Aucoin, it is recommended to install the package from git. The following will install Aucoin along with the additional development dependencies (optionally in a virtual environment):
|
||||
|
||||
```bash
|
||||
git clone git@git.caspervk.net:caspervk/aucoin.git
|
||||
cd aucoin
|
||||
python3 -m venv venv # optional
|
||||
. venv/bin/activate # optional
|
||||
pip3 install --editable .[dev]
|
||||
```
|
||||
|
||||
Execute `git pull` to update from upstream.
|
||||
|
||||
#### Building Package
|
||||
To build wheels for the project, first install the `wheel` package:
|
||||
|
||||
```bash
|
||||
pip3 install wheel
|
||||
```
|
||||
|
||||
To build the wheel:
|
||||
|
||||
```bash
|
||||
python3 setup.py bdist_wheel
|
||||
```
|
||||
|
||||
More information on how to package and distribute projects [here](https://packaging.python.org/tutorials/distributing-packages/#packaging-your-project).
|
||||
|
||||
## Seed Node
|
||||
The only centralised component of the system is the seed nodes; these are the nodes that new clients will connect with to bootstrap the network. By default, clients retrieve the list of seed node IP-addresses through DNS at the hostname seed.aucoin.network. To support multiple seed nodes the zone should be configured as follows:
|
||||
|
||||
```zone
|
||||
seed IN A <seed address>
|
||||
IN A <seed address>
|
||||
...
|
||||
```
|
BIN
aucoin.pdf
Normal file
BIN
aucoin.pdf
Normal file
Binary file not shown.
3
aucoin/__init__.py
Normal file
3
aucoin/__init__.py
Normal file
|
@ -0,0 +1,3 @@
|
|||
__version__ = "0.0.1"
|
||||
__author__ = "Casper V. Kristensen & Magnus Meng Mortensen"
|
||||
__licence__ = "GPLv3"
|
11
aucoin/__main__.py
Normal file
11
aucoin/__main__.py
Normal file
|
@ -0,0 +1,11 @@
|
|||
"""
|
||||
The main terminal-based entry point. Invoke as `aucoin' or `python3 -m aucoin'.
|
||||
"""
|
||||
import sys
|
||||
|
||||
if __name__ == '__main__':
|
||||
if not sys.version_info >= (3, 6):
|
||||
exit("Aucoin requires python 3.6 or above")
|
||||
|
||||
from aucoin.main import main
|
||||
main()
|
185
aucoin/block.py
Normal file
185
aucoin/block.py
Normal file
|
@ -0,0 +1,185 @@
|
|||
import json
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from typing import List, Union
|
||||
|
||||
from sqlalchemy import Boolean, Text
|
||||
from sqlalchemy import Column, Integer, LargeBinary
|
||||
from sqlalchemy.ext.orderinglist import ordering_list
|
||||
from sqlalchemy.orm import relationship
|
||||
|
||||
from aucoin import database
|
||||
from aucoin import util
|
||||
from aucoin.transactions import Transaction, CoinbaseTransaction
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Block(database.DBBase):
|
||||
__tablename__ = "blocks"
|
||||
|
||||
# Directly mapped to python attributes:
|
||||
version = Column(Integer)
|
||||
hash_prev_block = Column(LargeBinary)
|
||||
merkle_root_hash = Column(LargeBinary)
|
||||
timestamp = Column(Integer)
|
||||
target = Column(LargeBinary)
|
||||
signature = Column(LargeBinary)
|
||||
public_key = Column(LargeBinary)
|
||||
errors = Column(Text, default='[]')
|
||||
|
||||
# SQLAlchemy-internal stuff:
|
||||
_id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
_hash = Column(LargeBinary)
|
||||
_difficulty_sum = Column(Integer)
|
||||
_block_height = Column(Integer)
|
||||
_main_branch = Column(Boolean, default=False)
|
||||
|
||||
# Relationships:
|
||||
transactions = relationship("Transaction",
|
||||
back_populates="block",
|
||||
order_by="Transaction._index",
|
||||
collection_class=ordering_list("_index"))
|
||||
|
||||
def __init__(self, version=1, hash_prev_block: bytes = b"", merkle_root_hash: bytes = None,
|
||||
timestamp=int(datetime.utcnow().timestamp()), target: bytes = b"", signature: bytes = b"",
|
||||
public_key: bytes = b"", transactions: List[Union[Transaction, CoinbaseTransaction]] = None):
|
||||
"""
|
||||
:param version: Indicates which validations rules to follow. Used for backwards-compatibility breaking updates to the protocol.
|
||||
:param hash_prev_block: Hash of the previous block.
|
||||
:param merkle_root_hash: Merkle root hash of transactions.
|
||||
:param timestamp: Unix epoch timestamp of when the miner started mining the block.
|
||||
:param target: Target threshold (not difficulty). The block's hash must be less than or equal to this.
|
||||
:param signature: Signature of the other header fields, validated with the public key receiving payments in the
|
||||
coinbase. Also used as a nonce to modify the hash in the mining process.
|
||||
:param public_key: The public key of the address in the coinbase. Used to verify the signature.
|
||||
:param transactions: List of transactions (MUST be same order as first row of merkle tree).
|
||||
"""
|
||||
### HEADER ###
|
||||
self.version = version
|
||||
self.hash_prev_block = hash_prev_block
|
||||
self.merkle_root_hash = merkle_root_hash
|
||||
self.timestamp = timestamp
|
||||
self.target = target
|
||||
self.signature = signature
|
||||
self.public_key = public_key
|
||||
### END ###
|
||||
|
||||
### BODY ###
|
||||
self.transactions = transactions
|
||||
### END ###
|
||||
|
||||
# Synchronize transaction ordering for ordering_list collection
|
||||
self.transactions.reorder()
|
||||
|
||||
# Calculate merkle root hash if not set manually
|
||||
if self.transactions and not self.merkle_root_hash:
|
||||
self.calculate_merkle()
|
||||
|
||||
@property
|
||||
def header(self) -> bytes:
|
||||
return self.version.to_bytes(4, "big") + \
|
||||
self.hash_prev_block + \
|
||||
self.merkle_root_hash + \
|
||||
self.timestamp.to_bytes(8, "big") + \
|
||||
self.target + \
|
||||
self.signature + \
|
||||
self.public_key
|
||||
|
||||
@property
|
||||
def truncated_header(self) -> bytes:
|
||||
"""
|
||||
Same as the header, but with the signature truncated. Used when signing and validating the block signature.
|
||||
"""
|
||||
return self.version.to_bytes(4, "big") + \
|
||||
self.hash_prev_block + \
|
||||
self.merkle_root_hash + \
|
||||
self.timestamp.to_bytes(8, "big") + \
|
||||
self.target + \
|
||||
self.public_key
|
||||
|
||||
@property
|
||||
def hash(self) -> bytes:
|
||||
return util.hash(self.header)
|
||||
|
||||
@property
|
||||
def size(self) -> int:
|
||||
"""
|
||||
:return: Block size in bytes.
|
||||
"""
|
||||
return len(self.header) + sum(tx.size for tx in self.transactions)
|
||||
|
||||
@property
|
||||
def height(self) -> int:
|
||||
return self.transactions[0].block_height
|
||||
|
||||
@property
|
||||
def difficulty(self) -> int:
|
||||
"""
|
||||
Used to calculate _difficulty_sum, which in turn is used when comparing two chains. Each block counts as
|
||||
(2^256 / target); this is the expected/average number of attempts that were necessary to create it.
|
||||
|
||||
Based on:
|
||||
https://bitcoin.stackexchange.com/questions/936/939#939
|
||||
https://bitcoin.stackexchange.com/questions/29742/
|
||||
|
||||
:return: The difficulty of the block.
|
||||
"""
|
||||
return 2**256 // int.from_bytes(self.target, "big")
|
||||
|
||||
def calculate_merkle(self, update=True) -> bytes:
|
||||
"""
|
||||
Calculate merkle root hash based on block's transactions.
|
||||
|
||||
:param update: Updates self.merkle_root_hash if set to True.
|
||||
:return: The merkle root hash of the transactions.
|
||||
"""
|
||||
merkle_root_hash = util.merkle_root_hash([tx.hash for tx in self.transactions])
|
||||
|
||||
if update:
|
||||
self.merkle_root_hash = merkle_root_hash
|
||||
|
||||
return merkle_root_hash
|
||||
|
||||
@property
|
||||
def raw(self) -> dict:
|
||||
return {
|
||||
"hash": self.hash.hex(),
|
||||
"version": self.version,
|
||||
"hash_prev_block": self.hash_prev_block.hex(),
|
||||
"merkle_root_hash": self.merkle_root_hash.hex(),
|
||||
"timestamp": self.timestamp,
|
||||
"target": self.target.hex(),
|
||||
"signature": self.signature.hex(),
|
||||
"public_key": self.public_key.hex(),
|
||||
"transactions": [tx.raw for tx in self.transactions]
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def from_raw(version=None, hash_prev_block=None, merkle_root_hash=None, timestamp=None,
|
||||
target=None, signature=None, public_key=None, transactions=None, hash=None):
|
||||
return Block(
|
||||
version=version,
|
||||
hash_prev_block=bytes.fromhex(hash_prev_block),
|
||||
merkle_root_hash=bytes.fromhex(merkle_root_hash),
|
||||
timestamp=timestamp,
|
||||
target=bytes.fromhex(target),
|
||||
signature=bytes.fromhex(signature),
|
||||
public_key=bytes.fromhex(public_key),
|
||||
transactions=[CoinbaseTransaction.from_raw(**transactions[0])] +
|
||||
[Transaction.from_raw(**tx) for tx in transactions[1:]]
|
||||
)
|
||||
|
||||
def json(self, indent=None, internal=False) -> str:
|
||||
if internal:
|
||||
return json.dumps({
|
||||
**self.raw,
|
||||
**{"_difficulty_sum": self._difficulty_sum,
|
||||
"_block_height": self._block_height,
|
||||
"_main_branch": self._main_branch}},
|
||||
indent=indent)
|
||||
|
||||
return json.dumps(self.raw, indent=indent)
|
||||
|
||||
def __str__(self, *args, **kwargs):
|
||||
return f"Block({self.json(indent=4, internal=True)})"
|
469
aucoin/blockchain.py
Normal file
469
aucoin/blockchain.py
Normal file
|
@ -0,0 +1,469 @@
|
|||
import logging
|
||||
from datetime import datetime, timedelta
|
||||
from statistics import median_low
|
||||
from typing import Optional, Iterable, Set
|
||||
|
||||
from sqlalchemy import Column, bindparam, true, false
|
||||
from sqlalchemy import Integer
|
||||
from sqlalchemy import LargeBinary
|
||||
from sqlalchemy import func
|
||||
|
||||
from aucoin import consensus
|
||||
from aucoin import database
|
||||
from aucoin.block import Block
|
||||
from aucoin.database import session_scope, bakery
|
||||
from aucoin.transactions import Output, CoinbaseTransaction, Transaction
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Header(database.DBBase):
|
||||
__tablename__ = "header"
|
||||
_id = Column(Integer, default=0, primary_key=True)
|
||||
hash = Column(LargeBinary)
|
||||
|
||||
def __init__(self, hash):
|
||||
self.hash = hash
|
||||
|
||||
|
||||
# noinspection PyProtectedMember
|
||||
class Blockchain(object):
|
||||
def __init__(self, reset=False):
|
||||
self.orphans = {}
|
||||
|
||||
database.DBBase.metadata.create_all(database.engine)
|
||||
|
||||
# Reset database if requested by parameter of if tables are empty
|
||||
with session_scope() as session:
|
||||
if reset or not session.query(Block).first():
|
||||
self.reset(session)
|
||||
|
||||
@property
|
||||
def size(self) -> int:
|
||||
"""
|
||||
:return: Size of the blockchain database file in bytes.
|
||||
"""
|
||||
return database.path.stat().st_size
|
||||
|
||||
@classmethod
|
||||
def header(cls, session) -> Block:
|
||||
"""
|
||||
:param session: Database session.
|
||||
:return: The blockchain's header block.
|
||||
"""
|
||||
baked = bakery(lambda s: s.query(Header))
|
||||
block_hash = baked(session).first().hash
|
||||
|
||||
return cls.block(block_hash, session)
|
||||
|
||||
@classmethod
|
||||
def set_header(cls, block: Block, session):
|
||||
"""
|
||||
Update the blockchain's header.
|
||||
|
||||
:param block: The blockchain's new header block.
|
||||
:param session: Database session.
|
||||
"""
|
||||
baked = bakery(lambda s: s.query(Header))
|
||||
baked(session).first().hash = block._hash
|
||||
|
||||
def add(self, block: Block, session, main_branch=False):
|
||||
"""
|
||||
Adds a block to the database. Only the validator should call this function.
|
||||
|
||||
:param block: The block to add to the database
|
||||
:param session: Database session.
|
||||
:param main_branch: Should the block be regarded as part of the main branch?
|
||||
"""
|
||||
prev_block = self.block(block.hash_prev_block, session)
|
||||
|
||||
# Calculate SQLAlchemy-internal stuff
|
||||
block._hash = block.hash
|
||||
block._difficulty_sum = prev_block._difficulty_sum + block.difficulty
|
||||
block._block_height = prev_block.height + 1
|
||||
block._main_branch = main_branch
|
||||
|
||||
for transaction in block.transactions:
|
||||
transaction._hash = transaction.hash
|
||||
|
||||
session.merge(block)
|
||||
|
||||
@classmethod
|
||||
def block(cls, block_hash, session) -> Optional[Block]:
|
||||
"""
|
||||
Retrieve a block based on its hash.
|
||||
|
||||
:param block_hash: The hash of the block to retrieve from the database.
|
||||
:param session: Database session.
|
||||
:return: Block matching the given block_hash if it exists in the database. None otherwise.
|
||||
"""
|
||||
baked = bakery(lambda s: s.query(Block))
|
||||
baked += lambda q: q.filter(Block._hash == bindparam("block_hash"))
|
||||
|
||||
return baked(session).params(block_hash=block_hash).first()
|
||||
|
||||
@classmethod
|
||||
def block_at_height(cls, height, session) -> Optional[Block]:
|
||||
"""
|
||||
Retrieve a block from the main branch based on its height.
|
||||
|
||||
:param height: The height of the block to retrieve from the database.
|
||||
:param session: Database session.
|
||||
:return: Block from the main branch matching given height if it exists in the database. None otherwise.
|
||||
"""
|
||||
baked = bakery(lambda s: s.query(Block))
|
||||
baked += lambda q: q.filter(Block._main_branch == true())
|
||||
baked += lambda q: q.filter(Block._block_height == bindparam("height"))
|
||||
|
||||
return baked(session).params(height=height).first()
|
||||
|
||||
@classmethod
|
||||
def block_most_work(cls, session) -> Block:
|
||||
"""
|
||||
Find the block with the greatest difficulty sum; this block is the header of the chain with the greatest total
|
||||
expected/average number of attempts that were necessary to create it.
|
||||
|
||||
:param session: Database session.
|
||||
:return: Block with most work.
|
||||
"""
|
||||
baked = bakery(lambda s: s.query(func.max(Block._difficulty_sum)))
|
||||
max_difficulty_sum = baked(session).scalar()
|
||||
|
||||
baked = bakery(lambda s: s.query(Block))
|
||||
baked += lambda q: q.filter(Block._difficulty_sum == bindparam("max_difficulty_sum"))
|
||||
|
||||
return baked(session).params(max_difficulty_sum=max_difficulty_sum).first()
|
||||
|
||||
@classmethod
|
||||
def number_of_blocks(cls, session, main_branch_only=False) -> int:
|
||||
"""
|
||||
Count the number of blocks in the database.
|
||||
|
||||
:param session: Database session.
|
||||
:param main_branch_only: If True, only count blocks in the main branch.
|
||||
:return: Number of blocks in the blockchain.
|
||||
"""
|
||||
baked = bakery(lambda s: s.query(func.count(Block._id)))
|
||||
|
||||
if main_branch_only:
|
||||
baked += lambda q: q.filter(Block._main_branch == true())
|
||||
|
||||
return baked(session).scalar()
|
||||
|
||||
@classmethod
|
||||
def average_block_timespan(cls, session, n=None) -> int:
|
||||
"""
|
||||
Average timespan of blocks in the main branch since genesis if no n is provided, otherwise average is over the
|
||||
last n blocks.
|
||||
|
||||
:param session: Database session.
|
||||
:param n: Number of blocks to calculate average over.
|
||||
:return: The average timespan.
|
||||
"""
|
||||
header = cls.header(session)
|
||||
|
||||
# If header is genesis there are no timespans yet
|
||||
if header.height == 0:
|
||||
return 0
|
||||
|
||||
# Average is since genesis if no n is specified or if there are fewer than n blocks in the main branch
|
||||
if n is None or header.height - n < 0:
|
||||
block = cls.genesis_block(session)
|
||||
else:
|
||||
block = cls.block_at_height(header.height - n, session)
|
||||
|
||||
return (header.timestamp - block.timestamp) / (header.height - block.height)
|
||||
|
||||
@classmethod
|
||||
def transaction(cls, transaction_hash, session) -> Optional[Transaction]:
|
||||
"""
|
||||
Retrieve a transaction from the main branch based on its hash.
|
||||
|
||||
:param transaction_hash: The hash of the transaction to retrieve from the database.
|
||||
:param session: Database session.
|
||||
:return: Transaction matching the given transaction_hash if it exists in the main branch. None otherwise.
|
||||
"""
|
||||
baked = bakery(lambda s: s.query(Transaction))
|
||||
baked += lambda q: q.join(Transaction.block)
|
||||
baked += lambda q: q.filter(Transaction._hash == bindparam("transaction_hash"))
|
||||
baked += lambda q: q.filter(Block._main_branch == true())
|
||||
|
||||
return baked(session).params(transaction_hash=transaction_hash).first()
|
||||
|
||||
@classmethod
|
||||
def txo(cls, transaction_hash, index, session) -> Optional[Output]:
|
||||
"""
|
||||
Retrieve a transaction output from the main branch based on its containing transaction's hash and its index.
|
||||
|
||||
:param transaction_hash: The hash of the transaction containing the desired output.
|
||||
:param index: The index of the output in the transaction.
|
||||
:param session: Database session.
|
||||
:return: Output if it exists. None otherwise.
|
||||
"""
|
||||
baked = bakery(lambda s: s.query(Output))
|
||||
baked += lambda q: q.join(Output.transaction)
|
||||
baked += lambda q: q.join(Transaction.block)
|
||||
baked += lambda q: q.filter(Output._index == bindparam("index"))
|
||||
baked += lambda q: q.filter(Transaction._hash == bindparam("transaction_hash"))
|
||||
baked += lambda q: q.filter(Block._main_branch == true())
|
||||
|
||||
return baked(session).params(transaction_hash=transaction_hash, index=index).first()
|
||||
|
||||
@classmethod
|
||||
def utxo(cls, transaction_hash, index, session) -> Optional[Output]:
|
||||
"""
|
||||
Retrieve an unspent transaction output from the main branch based on its containing transaction's hash and its
|
||||
index.
|
||||
|
||||
:param transaction_hash: The hash of the transaction containing the desired output.
|
||||
:param index: The index of the output in the transaction.
|
||||
:param session: Database session.
|
||||
:return: Output if it exists and is unspent. None otherwise.
|
||||
"""
|
||||
txo = cls.txo(transaction_hash, index, session)
|
||||
|
||||
if txo is None or txo._spent:
|
||||
return None
|
||||
|
||||
return txo
|
||||
|
||||
@classmethod
|
||||
def txos_of_addresses(cls, addresses, session, unspent_only=False, limit=None) -> Iterable[Output]:
|
||||
"""
|
||||
Retrieve iterable of transaction outputs which were sent to any of the given addresses.
|
||||
|
||||
:param addresses: Collection of addresses.
|
||||
:param session: Database session.
|
||||
:param unspent_only: Only return unspent outputs (UTXOs).
|
||||
:param limit: Limit number of returned outputs. Newer outputs are returned before older ones.
|
||||
:return: Potentially empty iterable of transaction outputs.
|
||||
"""
|
||||
baked = bakery(lambda s: s.query(Output))
|
||||
baked += lambda q: q.join(Output.transaction)
|
||||
baked += lambda q: q.join(Transaction.block)
|
||||
baked += lambda q: q.filter(Output.address.in_(bindparam("addresses", expanding=True)))
|
||||
baked += lambda q: q.filter(Block._main_branch == true())
|
||||
|
||||
if unspent_only:
|
||||
baked += lambda q: q.filter(Output._spent == false())
|
||||
|
||||
if limit is not None:
|
||||
baked += lambda q: q.order_by(Block._block_height.desc())
|
||||
baked += lambda q: q.limit(bindparam("limit"))
|
||||
|
||||
return baked(session).params(addresses=list(addresses), limit=limit)
|
||||
|
||||
@classmethod
|
||||
def utxos_of_addresses(cls, addresses, session) -> Iterable[Output]:
|
||||
"""
|
||||
Retrieve list of unspent transaction outputs which were sent to any of the given addresses.
|
||||
|
||||
:param addresses: Collection of addresses.
|
||||
:param session: Database session.
|
||||
:return: Potentially empty list of unspent transaction outputs.
|
||||
"""
|
||||
return cls.txos_of_addresses(addresses, session, unspent_only=True)
|
||||
|
||||
@classmethod
|
||||
def blocks_ahead(cls, block_hash, session, limit=100) -> Iterable[Block]:
|
||||
"""
|
||||
Retrieve list of blocks ahead of given block_hash in main branch, EXCLUDING the block matching block_hash.
|
||||
If block_hash doesn't exist in main branch, return blocks ahead of genesis.
|
||||
|
||||
:param block_hash: The hash of the block to retrieve blocks ahead for.
|
||||
:param session: Database session.
|
||||
:param limit: Limit length of list to this many blocks.
|
||||
:return: Iterable of blocks ahead.
|
||||
"""
|
||||
# Find height of the block matching block_hash. If it doesn't exist in main branch genesis is the first ahead
|
||||
block = cls.block(block_hash, session)
|
||||
if block is not None and block._main_branch:
|
||||
height = block._block_height
|
||||
else:
|
||||
height = 0
|
||||
|
||||
# Return up to limit blocks ahead in main branch
|
||||
baked = bakery(lambda s: s.query(Block))
|
||||
baked += lambda q: q.filter(Block._main_branch == true())
|
||||
baked += lambda q: q.filter(Block._block_height.between(bindparam("height") + 1, # + 1 excludes given block
|
||||
bindparam("height") + bindparam("limit")))
|
||||
baked += lambda q: q.order_by(Block._block_height.asc())
|
||||
|
||||
return baked(session).params(height=height, limit=limit)
|
||||
|
||||
@classmethod
|
||||
def blocks_behind(cls, block_hash, session, limit=None) -> Iterable[Block]:
|
||||
"""
|
||||
Retrieve iterable of blocks behind given block hash, INCLUDING the block matching block_hash.
|
||||
|
||||
:param block_hash: The hash of the block to retrieve blocks behind for.
|
||||
:param session: Database session.
|
||||
:param limit: Limit number of returned blocks to this many elements.
|
||||
:return: An iterable of blocks behind given block_hash.
|
||||
"""
|
||||
# Follow chain of hash_prev_block's until we hit the limit, genesis, or a block from the main branch.
|
||||
count = 0
|
||||
while True:
|
||||
block = cls.block(block_hash, session)
|
||||
yield block
|
||||
count += 1
|
||||
|
||||
# Job's done if we reached either limit or genesis
|
||||
if count == limit or block.hash_prev_block == bytes(32):
|
||||
return
|
||||
|
||||
# Break to SQL-optimised algorithm if we hit the main branch
|
||||
if block._main_branch:
|
||||
break
|
||||
|
||||
block_hash = block.hash_prev_block
|
||||
|
||||
# Each block height is unique within the main branch, so once we hit it we can utilise SQL to retrieve the rest
|
||||
# of the blocks faster.
|
||||
baked = bakery(lambda s: s.query(Block))
|
||||
baked += lambda q: q.filter(Block._main_branch == true())
|
||||
baked += lambda q: q.filter(Block._block_height < bindparam("height"))
|
||||
baked += lambda q: q.order_by(Block._block_height.desc())
|
||||
|
||||
if limit:
|
||||
baked += lambda q: q.limit(limit - count)
|
||||
else:
|
||||
# Don't prefetch the full result-set if no limit since there's no telling how big that is
|
||||
baked += lambda q: q.yield_per(1)
|
||||
|
||||
yield from baked(session).params(height=block._block_height)
|
||||
|
||||
@classmethod
|
||||
def median_timestamp(cls, block_hash, session, n=consensus.block_median_timestamp_nblocks) -> int:
|
||||
"""
|
||||
Retrieve the median timestamp (not timespan!) of the last n blocks, INCLUDING the block matching block_hash.
|
||||
|
||||
:param block_hash: The hash of the block to retrieve
|
||||
:param session: Database session.
|
||||
:param n: The number of blocks behind given block_hash to include.
|
||||
:return: The median timestamp of the last n blocks. When n is odd, the timestamp of the middle block is returned.
|
||||
When it is even, the smaller of the two middle values is returned.
|
||||
"""
|
||||
return median_low(block.timestamp for block in cls.blocks_behind(block_hash, session, limit=n))
|
||||
|
||||
@classmethod
|
||||
def find_fork(cls, block_hash, session) -> Block:
|
||||
"""
|
||||
Find the block from which the branch with the block matching given block_hash forks of the main branch.
|
||||
|
||||
:param block_hash: Block hash matching a side-branch block.
|
||||
:param session: Database session.
|
||||
:return The fork block. If block matching block_hash is in the main branch, that block itself is returned.
|
||||
"""
|
||||
return next(block for block in cls.blocks_behind(block_hash, session) if block._main_branch)
|
||||
|
||||
@classmethod
|
||||
def known_addresses(cls, session) -> Set[bytes]:
|
||||
"""
|
||||
Find set of addresses who have ever received or mined coins. Useful for stress-testing the network by sending
|
||||
transactions to random addresses.
|
||||
|
||||
:param session: Database session.
|
||||
:return: Set of addresses.
|
||||
"""
|
||||
baked = bakery(lambda s: s.query(Output.address))
|
||||
|
||||
return set(address for (address,) in baked(session)) # SQLAlchemy returns {(address,),..}; unpack
|
||||
|
||||
@classmethod
|
||||
def prune(cls, session):
|
||||
"""
|
||||
Delete all side-branch blocks with a height 1000 or more from the current header.
|
||||
|
||||
:param session: Database session.
|
||||
"""
|
||||
return session.query(Block). \
|
||||
filter(Block._main_branch == false()). \
|
||||
filter(Block._block_height < cls.header(session).height - 1000). \
|
||||
delete()
|
||||
|
||||
@classmethod
|
||||
def catching_up_progress(cls, session) -> float:
|
||||
"""
|
||||
Very simple heuristic for determining if we're currently catching up to the rest of the network, and if we are,
|
||||
what fraction of the blockchain we have processed so far.
|
||||
|
||||
:param session: Database session.
|
||||
:return: Fraction of the blockchain we have processed so far in the interval [0;1]. Returns 1 if fully caught up.
|
||||
"""
|
||||
header_block = cls.header(session)
|
||||
|
||||
# Return 1 ("fully caught up") if header's timestamp is within 10 minutes of the current time, since inaccuracy
|
||||
# of time synchronisation between nodes will make the calculation inaccurate at that point.
|
||||
if datetime.fromtimestamp(header_block.timestamp) > datetime.utcnow() - timedelta(minutes=10):
|
||||
return 1.0
|
||||
|
||||
# Otherwise return estimate of the fraction of the blockchain we have processed so far
|
||||
|
||||
genesis_block = cls.genesis_block(session)
|
||||
|
||||
# Normalise header block's timestamp and current time according the timestamp of the genesis block
|
||||
normalised_header_time = header_block.timestamp - genesis_block.timestamp
|
||||
normalised_current_time = int(datetime.utcnow().timestamp()) - genesis_block.timestamp
|
||||
|
||||
return normalised_header_time / normalised_current_time
|
||||
|
||||
@classmethod
|
||||
def genesis_block(cls, session) -> Block:
|
||||
"""
|
||||
Retrieve the genesis block.
|
||||
|
||||
:param session: Database session.
|
||||
:return: Genesis block.
|
||||
"""
|
||||
baked = bakery(lambda s: s.query(Block))
|
||||
baked += lambda q: q.filter(Block.hash_prev_block == bytes(32))
|
||||
|
||||
return baked(session).first()
|
||||
|
||||
@classmethod
|
||||
def reset(cls, session):
|
||||
"""
|
||||
Reset the blockchain, deleting everything but the genesis block.
|
||||
|
||||
:param session: Database session.
|
||||
"""
|
||||
logger.warning("Resetting blockchain")
|
||||
|
||||
# Clear tables
|
||||
database.DBBase.metadata.drop_all(database.engine)
|
||||
database.DBBase.metadata.create_all(database.engine)
|
||||
|
||||
# Re-add genesis block. Note that the signature doesn't match the address of the coinbase (0x00..00), this makes
|
||||
# the genesis block invalid according to the validation rules. This, however, doesn't matter because the genesis
|
||||
# block is never actually validated.
|
||||
# The coinbase contains the latest block hash of the Bitcoin network, proving that we didn't pre-mine Aucoin.
|
||||
genesis = Block(
|
||||
version=1,
|
||||
hash_prev_block=bytes(32), # 0x00...00
|
||||
timestamp=1527607766, # TODO: insert result of int(datetime.utcnow().timestamp())
|
||||
target=bytes.fromhex("00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), # TODO
|
||||
transactions=[
|
||||
CoinbaseTransaction(
|
||||
version=1,
|
||||
address=bytes(32), # 0x00...00: no one will receive these coins
|
||||
value=consensus.block_reward,
|
||||
block_height=0,
|
||||
coinbase=bytes(
|
||||
"Bitcoin Mainnet @ 524988 = 0x0000000000000000002eab043a0041cd5147519c27e0202b420293457f073dab", # TODO
|
||||
encoding="ascii"
|
||||
)
|
||||
)
|
||||
]
|
||||
)
|
||||
|
||||
# These attributes are normally calculated and set by the add()-function, but since the genesis is quite special
|
||||
# our code has to be a little wet (not DRY).
|
||||
genesis._hash = genesis.hash
|
||||
genesis._difficulty_sum = genesis.difficulty
|
||||
genesis._block_height = 0
|
||||
genesis._main_branch = True
|
||||
genesis.transactions[0]._hash = genesis.transactions[0].hash
|
||||
|
||||
session.add(genesis)
|
||||
session.add(Header(genesis._hash))
|
251
aucoin/cli.py
Normal file
251
aucoin/cli.py
Normal file
|
@ -0,0 +1,251 @@
|
|||
import cmd
|
||||
import inspect
|
||||
import logging.config
|
||||
|
||||
import click
|
||||
from tabulate import tabulate
|
||||
|
||||
from aucoin import util
|
||||
from aucoin.database import session_scope
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Cli(cmd.Cmd):
|
||||
def __init__(self, core):
|
||||
super().__init__()
|
||||
self.core = core
|
||||
|
||||
self.intro = "Type help or ? to list commands. Ctrl+C to exit."
|
||||
self.prompt = "aucoin> "
|
||||
|
||||
def do_status(self, arg):
|
||||
"""
|
||||
status
|
||||
|
||||
Print node status.
|
||||
"""
|
||||
with session_scope() as session, self.core.lock:
|
||||
core = self.core
|
||||
header = core.blockchain.header(session)
|
||||
my_ip = core.network.factory.my_ip
|
||||
balance = core.wallet.balance
|
||||
|
||||
print(inspect.cleandoc(
|
||||
f"""
|
||||
Blockchain:
|
||||
Header hash: {header.hash.hex()}
|
||||
Header height: {header.height}
|
||||
Number of blocks:
|
||||
In main branch: {core.blockchain.number_of_blocks(session, main_branch_only=True)}
|
||||
In total: {core.blockchain.number_of_blocks(session)}
|
||||
Average block timespan:
|
||||
Last 100 blocks: {core.blockchain.average_block_timespan(session, n=100):.2f} seconds
|
||||
Since genesis: {core.blockchain.average_block_timespan(session):.2f} seconds
|
||||
Current difficulty: {header.difficulty}
|
||||
Orphans: {len(core.blockchain.orphans)}
|
||||
Size: {util.humanize(core.blockchain.size)}
|
||||
|
||||
Mempool:
|
||||
Transactions: {len(core.mempool)}
|
||||
Orphans: {len(core.mempool.orphans)}
|
||||
Size: {util.humanize(core.mempool.size)}
|
||||
|
||||
Miner:
|
||||
Performance: {util.humanize(core.miner.performance, prefix="dec", suffix="hashes/s") if core.miner.performance else "Stopped"}
|
||||
Workers: {len(core.miner.workers)}
|
||||
|
||||
Network:
|
||||
Node address: {my_ip.ip} ({my_ip.confidence:.0%} confidence)
|
||||
Connected peers: {len(core.network.factory.peerlist)} (max {core.network.max_peers})
|
||||
Peers: {core.network.factory.peerlist}
|
||||
|
||||
Wallet:
|
||||
Balance: {balance.confirmed} auc ({balance.unconfirmed:+} unconfirmed)
|
||||
Addresses: {len(core.wallet.addresses)}
|
||||
"""
|
||||
))
|
||||
|
||||
def do_history(self, count):
|
||||
"""
|
||||
history [count=20]
|
||||
|
||||
Print history of incoming/outgoing transactions.
|
||||
By default the last 20 transactions are shown, specify [count] to overwrite.
|
||||
"""
|
||||
try:
|
||||
count = int(count)
|
||||
except ValueError:
|
||||
count = 20
|
||||
|
||||
print(tabulate(self.core.wallet.history(limit=count), headers="keys", tablefmt="orgtbl", showindex=False))
|
||||
|
||||
def do_balance(self, address):
|
||||
"""
|
||||
balance <address>
|
||||
|
||||
Get confirmed balance of provided address.
|
||||
"""
|
||||
try:
|
||||
address = bytes.fromhex(address)
|
||||
if not address:
|
||||
raise ValueError
|
||||
except ValueError:
|
||||
print("Invalid address")
|
||||
return
|
||||
|
||||
with session_scope() as session:
|
||||
balance = sum(output.value for output in self.core.blockchain.utxos_of_addresses([address], session))
|
||||
|
||||
print(f"{balance} auc")
|
||||
|
||||
def do_send(self, arg):
|
||||
"""
|
||||
send <receiver address> <amount> [fee=0]
|
||||
|
||||
Construct and broadcast, to the network, a transaction sending <amount> of money to <receiver address>.
|
||||
By default the transaction fee is 0, specify [fee] to overwrite.
|
||||
"""
|
||||
# Extract values: default to 0 fee if none was provided
|
||||
try:
|
||||
receiver_address, amount, fee = arg.split()
|
||||
except ValueError:
|
||||
receiver_address, amount = arg.split()
|
||||
fee = 0
|
||||
|
||||
# Convert and check address, amount, and fee
|
||||
try:
|
||||
receiver_address = bytes.fromhex(receiver_address)
|
||||
amount = int(amount)
|
||||
fee = int(fee)
|
||||
|
||||
if not amount > 0:
|
||||
raise ValueError("Invalid amount")
|
||||
|
||||
if not fee >= 0:
|
||||
raise ValueError("Invalid fee")
|
||||
|
||||
except ValueError as e:
|
||||
return print(e)
|
||||
|
||||
self.core.wallet.make_transaction(receiver_address, amount, fee)
|
||||
|
||||
def do_receive(self, arg):
|
||||
"""
|
||||
receive
|
||||
|
||||
Generate a new wallet address to receive coins.
|
||||
"""
|
||||
print(self.core.wallet.new_address().hex())
|
||||
|
||||
def do_connect(self, address):
|
||||
"""
|
||||
connect <address>
|
||||
|
||||
Connect to node at <address>.
|
||||
"""
|
||||
self.core.network.connect(address)
|
||||
|
||||
def do_encrypt(self, arg):
|
||||
"""
|
||||
encrypt
|
||||
|
||||
Enable wallet encryption. Input empty password to remove encryption.
|
||||
"""
|
||||
self.core.wallet.encrypt()
|
||||
|
||||
def do_start_miner(self, workers):
|
||||
"""
|
||||
start_miner [workers=(number of available CPUs - 1)]
|
||||
|
||||
Start the miner, optionally with [workers] number of mining workers.
|
||||
"""
|
||||
if not self.core.miner.stopped:
|
||||
return print("Miner is already running.")
|
||||
|
||||
try:
|
||||
self.core.miner.start(int(workers))
|
||||
except ValueError:
|
||||
self.core.miner.start()
|
||||
|
||||
def do_stop_miner(self, arg):
|
||||
"""
|
||||
stop_miner
|
||||
|
||||
Stop the miner.
|
||||
"""
|
||||
self.core.miner.stop()
|
||||
|
||||
def do_block(self, block_hash):
|
||||
"""
|
||||
block <hash>
|
||||
|
||||
Print information about a block.
|
||||
"""
|
||||
try:
|
||||
block_hash = bytes.fromhex(block_hash)
|
||||
except ValueError:
|
||||
return print("Invalid block hash")
|
||||
|
||||
with session_scope() as session:
|
||||
print(self.core.blockchain.block(block_hash, session))
|
||||
|
||||
def do_transaction(self, transaction_hash):
|
||||
"""
|
||||
transaction <hash>
|
||||
|
||||
Print information about a transaction.
|
||||
"""
|
||||
try:
|
||||
transaction_hash = bytes.fromhex(transaction_hash)
|
||||
except ValueError:
|
||||
return print("Invalid transaction hash")
|
||||
|
||||
with session_scope() as session:
|
||||
print(self.core.blockchain.transaction(transaction_hash, session) or self.core.mempool.transaction(transaction_hash))
|
||||
|
||||
def do_prune(self, arg):
|
||||
"""
|
||||
prune
|
||||
|
||||
Delete all side-branch blocks with a height 1000 or more from the current header.
|
||||
"""
|
||||
with session_scope() as session:
|
||||
blocks_removed = self.core.blockchain.prune(session)
|
||||
print(f"{blocks_removed} blocks removed")
|
||||
|
||||
def do_clear(self, arg):
|
||||
"""
|
||||
clear
|
||||
|
||||
Clear the terminal screen
|
||||
"""
|
||||
click.clear()
|
||||
|
||||
def do_exit(self, arg):
|
||||
print("Use Ctrl+C to exit.")
|
||||
|
||||
def do_EOF(self, arg):
|
||||
return self.do_exit(arg)
|
||||
|
||||
def emptyline(self):
|
||||
"""
|
||||
Method called when an empty line is entered in response to the prompt.
|
||||
If this method is not overridden, it repeats the last nonempty command entered.
|
||||
"""
|
||||
pass
|
||||
|
||||
def postloop(self):
|
||||
"""
|
||||
Hook method executed once when the cmdloop() method is about to return.
|
||||
Do cleanup, core.stop(), etc here.
|
||||
"""
|
||||
print("Exiting..")
|
||||
|
||||
|
||||
def main(core):
|
||||
cli = Cli(core)
|
||||
try:
|
||||
cli.cmdloop()
|
||||
except KeyboardInterrupt:
|
||||
cli.postloop()
|
41
aucoin/config.py
Normal file
41
aucoin/config.py
Normal file
|
@ -0,0 +1,41 @@
|
|||
from pathlib import Path
|
||||
|
||||
data_dir = Path.home().joinpath(".aucoin")
|
||||
|
||||
|
||||
def logging(console_level="WARNING"):
|
||||
return {
|
||||
"version": 1,
|
||||
"disable_existing_loggers": False,
|
||||
"formatters": {
|
||||
"standard": {
|
||||
"format": "%(asctime)s [%(levelname)-7s] %(name)s:%(funcName)s - %(message)s"
|
||||
}
|
||||
},
|
||||
"handlers": {
|
||||
"console": {
|
||||
"class": "logging.StreamHandler",
|
||||
"stream": "ext://sys.stdout",
|
||||
"formatter": "standard",
|
||||
"level": console_level
|
||||
},
|
||||
"file": {
|
||||
"class": "logging.handlers.RotatingFileHandler",
|
||||
"maxBytes": 10485760, # 10 MiB
|
||||
"backupCount": 10,
|
||||
"filename": data_dir.joinpath("logs/main.log"),
|
||||
"encoding": "utf-8",
|
||||
"formatter": "standard",
|
||||
"level": "DEBUG"
|
||||
}
|
||||
},
|
||||
"loggers": {
|
||||
"aucoin": {
|
||||
"level": "DEBUG"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"handlers": ["console", "file"],
|
||||
"level": "WARNING"
|
||||
}
|
||||
}
|
97
aucoin/consensus.py
Normal file
97
aucoin/consensus.py
Normal file
|
@ -0,0 +1,97 @@
|
|||
import json
|
||||
import logging
|
||||
import math
|
||||
from datetime import timedelta
|
||||
|
||||
block_reward = 100 # auc
|
||||
block_max_size = 102_400 # 100 kibibytes in bytes
|
||||
block_median_timestamp_nblocks = 11 # how many blocks to look behind when determining if a block's timestamp is greater than the median
|
||||
block_I_history_nblocks = 300
|
||||
block_max_future_time = timedelta(minutes=10) # how far in the future a block's timestamp is allowed to be
|
||||
block_time = timedelta(minutes=1) # desired amount of time between blocks
|
||||
|
||||
tx_coinbase_max_size = 100 # in bytes
|
||||
tx_min_fee = 0
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def required_target(block, blockchain, session, k_p=4, k_d=3, k_i=0.001) -> bytes:
|
||||
"""
|
||||
Calculate required target for block based on the previous blocks in the blockchain. Target is based on the mining
|
||||
time for the prior blocks to ensure future average mining time remains ~block_time.
|
||||
|
||||
Based on:
|
||||
https://en.bitcoin.it/wiki/Protocol_rules#Difficulty_change
|
||||
https://bitcoin.stackexchange.com/questions/855/what-keeps-the-average-block-time-at-10-minutes/857#857
|
||||
|
||||
:param block: The block to calculate target for.
|
||||
:param blockchain: The blockchain to use as context.
|
||||
:param session: Database session.
|
||||
:param k_p: TODO: What is this?
|
||||
:param k_d: TODO: What is this?
|
||||
:return: Required target as bytes.
|
||||
"""
|
||||
# Required target is genesis block's target for the first three blocks because we need prev_prev_prev_block
|
||||
if block.height <= 3:
|
||||
return blockchain.genesis_block(session).target
|
||||
|
||||
# Calculate the error and timespan of the previous block
|
||||
prev_block = blockchain.block(block.hash_prev_block, session)
|
||||
error, timespan = calculate_error_timespan(prev_block, blockchain, session)
|
||||
|
||||
errors = json.loads(prev_block.errors)
|
||||
errors.append(error)
|
||||
errors.reverse()
|
||||
|
||||
while len(errors) >= block_I_history_nblocks:
|
||||
errors.pop()
|
||||
|
||||
errors.reverse()
|
||||
block.errors = json.dumps(errors)
|
||||
|
||||
# Calculate the rror and timespan of the previous previous block
|
||||
prev_prev_block = blockchain.block(prev_block.hash_prev_block, session)
|
||||
last_error, last_timespan = calculate_error_timespan(prev_prev_block, blockchain, session)
|
||||
|
||||
# Calculate the derivative; the slope of the change in time
|
||||
try:
|
||||
derivative = (error - last_error) / timespan
|
||||
except ZeroDivisionError:
|
||||
derivative = 0
|
||||
|
||||
integral = sum(errors)
|
||||
|
||||
# Calculate a PD regulator with coefficients k_p and k_d
|
||||
pid = (k_p * error + k_d * derivative + k_i * integral)
|
||||
|
||||
#logger.debug("PID: %s", pid)
|
||||
#logger.debug("P: %s", error)
|
||||
#logger.debug("I: %s", integral)
|
||||
#logger.debug("D: %s", derivative)
|
||||
|
||||
# Apply the ratio of change to the previous block's target to get the new target
|
||||
prev_target = int.from_bytes(prev_block.target, "big")
|
||||
new_target = int(prev_target * (1 - pid/10000))
|
||||
|
||||
return new_target.to_bytes(32, "big")
|
||||
|
||||
|
||||
def calculate_error_timespan(block, blockchain, session):
|
||||
"""
|
||||
Calculates the error of a block. The error is a measure of how much the difficulty of the prev_block
|
||||
fluctuates from the actual block_time.
|
||||
|
||||
:param block: The block to find error of.
|
||||
:param blockchain: The blockchain used for calculations.
|
||||
:param session: Database session.
|
||||
:return: error, timespan
|
||||
"""
|
||||
prev_block = blockchain.block(block.hash_prev_block, session)
|
||||
|
||||
# If timespan is 0 then simply return a negative error, as we surely want to slow down mining then
|
||||
timespan = block.timestamp - prev_block.timestamp
|
||||
|
||||
error = block_time.seconds - timespan
|
||||
|
||||
return error, timespan
|
94
aucoin/core.py
Normal file
94
aucoin/core.py
Normal file
|
@ -0,0 +1,94 @@
|
|||
import logging.config
|
||||
import time
|
||||
from datetime import datetime
|
||||
from threading import RLock
|
||||
|
||||
import click
|
||||
|
||||
from aucoin.blockchain import Blockchain
|
||||
from aucoin.database import session_scope
|
||||
from aucoin.mempool import Mempool
|
||||
from aucoin.miner import Miner
|
||||
from aucoin.network import Network
|
||||
from aucoin.statistic import StatisticsLogger
|
||||
from aucoin.validation import Validator
|
||||
from aucoin.wallet import Wallet
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Core(object):
|
||||
def __init__(self, max_peers, interface, manual_seeds, miners, no_catch_up, statistics):
|
||||
# The Core lock is used by components when they *really* need consistency
|
||||
self.lock = RLock()
|
||||
|
||||
# Initialize main components
|
||||
self.blockchain = Blockchain()
|
||||
self.mempool = Mempool()
|
||||
|
||||
self.network = Network(self.blockchain, self.mempool, max_peers=max_peers, interface=interface, manual_seeds=manual_seeds)
|
||||
self.validator = Validator(self, self.blockchain, self.mempool, self.network)
|
||||
|
||||
self.wallet = Wallet(self.blockchain, self.mempool)
|
||||
|
||||
self.miner = Miner(self, self.blockchain, self.mempool, self.wallet, self.validator.add_block)
|
||||
|
||||
# Set up callbacks
|
||||
self.validator.subscribe_new_block(self.network.new_block)
|
||||
self.validator.subscribe_new_transaction(self.network.new_transaction)
|
||||
|
||||
self.network.subscribe_new_block(self.validator.add_block)
|
||||
self.network.subscribe_new_transaction(self.validator.add_transaction)
|
||||
|
||||
self.wallet.subscribe_new_transaction(self.validator.add_transaction)
|
||||
|
||||
self.validator.subscribe_new_header_block(self.miner.new_block)
|
||||
self.validator.subscribe_new_transaction(self.miner.new_transaction)
|
||||
|
||||
# Add statistics logger if specified
|
||||
if statistics:
|
||||
sl = StatisticsLogger(self, self.blockchain, self.mempool, self.miner, self.network, self.wallet)
|
||||
self.validator.subscribe_new_header_block(sl.new_header_block)
|
||||
|
||||
# Start network after callbacks have been set up
|
||||
self.network.start()
|
||||
|
||||
# Show a progressbar of catching-up progress before starting the miner, since it doesn't make sense to begin
|
||||
# mining until the client has fully caught up to the rest of the network.
|
||||
if not no_catch_up:
|
||||
logger.info("Catching up to the rest of the network..")
|
||||
self.progressbar()
|
||||
logger.info("Done catching up!")
|
||||
|
||||
if miners:
|
||||
self.miner.start(workers=miners)
|
||||
|
||||
def progressbar(self):
|
||||
blocks_per_second = 0
|
||||
header_timestamp = None
|
||||
|
||||
def info(progress):
|
||||
if header_timestamp is None:
|
||||
return "Connecting to the network.."
|
||||
return f"{blocks_per_second} blocks/s - Caught up to {header_timestamp} UTC"
|
||||
|
||||
with click.progressbar(length=1, label="Catching up", width=0, item_show_func=info) as bar:
|
||||
prev_progress = 0
|
||||
prev_height = 0
|
||||
|
||||
while prev_progress < 1:
|
||||
with session_scope() as session:
|
||||
progress = self.blockchain.catching_up_progress(session)
|
||||
header = self.blockchain.header(session)
|
||||
|
||||
blocks_per_second = header.height - prev_height
|
||||
header_timestamp = datetime.fromtimestamp(header.timestamp)
|
||||
prev_height = header.height
|
||||
|
||||
# update() takes the change to the progress, but can't handle 0
|
||||
delta = progress - prev_progress
|
||||
if delta > 0:
|
||||
bar.update(delta)
|
||||
|
||||
prev_progress = progress
|
||||
time.sleep(1)
|
37
aucoin/database.py
Normal file
37
aucoin/database.py
Normal file
|
@ -0,0 +1,37 @@
|
|||
import logging
|
||||
from contextlib import contextmanager
|
||||
|
||||
from sqlalchemy import create_engine
|
||||
from sqlalchemy.ext import baked
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
|
||||
from aucoin.config import data_dir
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# SQLAlchemy
|
||||
path = data_dir.joinpath("blockchain.db")
|
||||
engine = create_engine("sqlite:///" + str(path))
|
||||
Session = sessionmaker(bind=engine, autoflush=True)
|
||||
DBBase = declarative_base()
|
||||
bakery = baked.bakery(size=1000)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def session_scope():
|
||||
"""
|
||||
Provide a transactional scope around a series of operations.
|
||||
Usage:
|
||||
with session_scope() as session:
|
||||
blockchain.do_something(arg1, arg2, session)
|
||||
"""
|
||||
session = Session()
|
||||
try:
|
||||
yield session
|
||||
session.commit()
|
||||
except Exception:
|
||||
session.rollback()
|
||||
raise
|
||||
finally:
|
||||
session.close()
|
53
aucoin/dsa.py
Normal file
53
aucoin/dsa.py
Normal file
|
@ -0,0 +1,53 @@
|
|||
from collections import namedtuple
|
||||
|
||||
from cryptography.exceptions import InvalidSignature
|
||||
from cryptography.hazmat.backends import default_backend
|
||||
from cryptography.hazmat.primitives import hashes
|
||||
from cryptography.hazmat.primitives.asymmetric import ec
|
||||
|
||||
|
||||
SIGNATURE_ALGORITHM = ec.ECDSA(hashes.SHA256())
|
||||
CURVE = ec.SECP256K1 # https://en.bitcoin.it/wiki/Secp256k1
|
||||
Keypair = namedtuple("Keypair", ("private", "public"))
|
||||
|
||||
|
||||
def sign(private_key: ec.EllipticCurvePrivateKey, data: bytes) -> bytes:
|
||||
"""
|
||||
Produce a signature on the data using the private-key.
|
||||
|
||||
:param private_key: Private key used to sign.
|
||||
:param data: Bytes to sign.
|
||||
:return: Signature.
|
||||
"""
|
||||
signature = private_key.sign(data, SIGNATURE_ALGORITHM)
|
||||
|
||||
return signature
|
||||
|
||||
|
||||
def verify(public_key: ec.EllipticCurvePublicKey, data: bytes, signature: bytes) -> bool:
|
||||
"""
|
||||
Verify authenticity of data using public-key and signature.
|
||||
|
||||
:param public_key: Signer's public-key.
|
||||
:param data: Bytes to verify.
|
||||
:param signature: Signature as bytes.
|
||||
:return: True if signature is valid, False otherwise.
|
||||
"""
|
||||
try:
|
||||
public_key.verify(signature, data, SIGNATURE_ALGORITHM)
|
||||
except InvalidSignature: # we would rather work with booleans than exceptions.
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def generate_keypair() -> Keypair:
|
||||
"""
|
||||
Generate elliptic curve keypair.
|
||||
|
||||
:return: Keypair.
|
||||
"""
|
||||
private_key = ec.generate_private_key(CURVE, default_backend())
|
||||
public_key = private_key.public_key()
|
||||
|
||||
return Keypair(private_key, public_key)
|
28
aucoin/exceptions.py
Normal file
28
aucoin/exceptions.py
Normal file
|
@ -0,0 +1,28 @@
|
|||
class InvalidException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidBlockException(InvalidException):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidTransactionException(InvalidException):
|
||||
pass
|
||||
|
||||
|
||||
class OrphanException(Exception):
|
||||
def __init__(self, *args, missing=None, **kwargs):
|
||||
"""
|
||||
Missing is the referenced block/transaction hash we don't have. The network should query the peer we received
|
||||
the block/transaction from for the missing information.
|
||||
"""
|
||||
super().__init__(*args, **kwargs)
|
||||
self.missing = missing
|
||||
|
||||
|
||||
class OrphanBlockException(OrphanException):
|
||||
pass
|
||||
|
||||
|
||||
class OrphanTransactionException(OrphanException):
|
||||
pass
|
61
aucoin/main.py
Normal file
61
aucoin/main.py
Normal file
|
@ -0,0 +1,61 @@
|
|||
import logging.config
|
||||
import shutil
|
||||
import urllib.request
|
||||
|
||||
import click
|
||||
|
||||
from aucoin import cli
|
||||
from aucoin import config, __version__, __author__
|
||||
from aucoin import util
|
||||
from aucoin.core import Core
|
||||
from aucoin.miner import Miner
|
||||
from aucoin.network import Network
|
||||
|
||||
|
||||
def validate_address(ctx, param, value):
|
||||
try:
|
||||
if value is not None:
|
||||
return bytes.fromhex(value)
|
||||
except ValueError:
|
||||
raise click.BadParameter("Invalid address")
|
||||
|
||||
|
||||
@click.command()
|
||||
@click.option("-m", "--miners", default=util.get_default_args(Miner.start)["workers"], show_default=True, help="Number of mining processors.")
|
||||
@click.option("-p", "--max-peers", default=util.get_default_args(Network)["max_peers"], show_default=True, help="Maximum number of network peers.")
|
||||
@click.option("-i", "--interface", default=util.get_default_args(Network)["interface"], show_default=True, help="Network interface to bind to.")
|
||||
@click.option("-s", "--seed", multiple=True, help="Nodes to connect to. Overrides DNS seeds and saved peer database. Can be specified multiple times.")
|
||||
@click.option("-v", "--verbose", count=True, help="Increase verbosity. Can be used multiple times.")
|
||||
@click.option("--no-catch-up", is_flag=True, help="Skip catching up to the rest of the network before starting miner and CLI.")
|
||||
@click.option("--fast-unsafe-catch-up", is_flag=True, help="Catch up much faster by downloading the blockchain database from central server (aucoin.network).")
|
||||
@click.option("--statistics", is_flag=True, help="Log statistics to .aucoin/statistics/stats.json.")
|
||||
@click.option("--clean", is_flag=True, help="Remove data directory (blockchain, wallet, etc).")
|
||||
def main(miners, max_peers, interface, seed, verbose, no_catch_up, fast_unsafe_catch_up, statistics, clean):
|
||||
print(f"Aucoin v{__version__}")
|
||||
print("(c)", __author__)
|
||||
|
||||
if clean:
|
||||
print(f"WARNING: Removing {config.data_dir}")
|
||||
shutil.rmtree(config.data_dir, ignore_errors=True)
|
||||
|
||||
# Configure logging
|
||||
util.make_data_dirs("logs/")
|
||||
console_level = ["WARNING", "INFO", "DEBUG"][min(verbose, 2)]
|
||||
logging.config.dictConfig(config.logging(console_level=console_level))
|
||||
logger = logging.getLogger(__name__)
|
||||
print("Console logging level is", console_level)
|
||||
|
||||
# Download blockchain.db from aucoin.network if fast_unsafe_catch_up is specified
|
||||
if fast_unsafe_catch_up:
|
||||
logger.warning("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
|
||||
logger.warning("@ WARNING: USING BLOCKCHAIN DATABASE FROM CENTRAL SERVER! @")
|
||||
logger.warning("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
|
||||
with urllib.request.urlopen("https://aucoin.network/blockchain.db") as db, \
|
||||
open(config.data_dir.joinpath("blockchain.db"), "wb") as file:
|
||||
shutil.copyfileobj(db, file)
|
||||
|
||||
# Set up core
|
||||
core = Core(max_peers, interface, seed, miners, no_catch_up, statistics)
|
||||
|
||||
logger.info("Starting cli")
|
||||
cli.main(core)
|
123
aucoin/mempool.py
Normal file
123
aucoin/mempool.py
Normal file
|
@ -0,0 +1,123 @@
|
|||
import logging
|
||||
from typing import Optional, Set, Tuple
|
||||
|
||||
from aucoin.transactions import Transaction, Output
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Mempool(dict):
|
||||
"""
|
||||
The mempool maintains two collections of transactions:
|
||||
- The transaction pool: an unordered collection of transactions that are not in blocks in the main branch, but for
|
||||
which we have input transactions, i.e. a set of (valid) transactions yet to be put in a block.
|
||||
- Orphan transactions: transactions that can't go into the transaction pool due to one or more missing "parent"
|
||||
input transactions.
|
||||
|
||||
NOTE: The validator keeps the transaction pool consistent such that no two transactions conflicts with each other.
|
||||
|
||||
Inspired by:
|
||||
https://en.bitcoin.it/wiki/Protocol_rules#Data_structures
|
||||
https://bitcoin.stackexchange.com/questions/59174
|
||||
"""
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.orphans = {}
|
||||
|
||||
@property
|
||||
def size(self) -> int:
|
||||
"""
|
||||
:return: Size of the transaction pool in bytes.
|
||||
"""
|
||||
return sum(transaction.size for transaction in self.values())
|
||||
|
||||
@property
|
||||
def spent(self) -> Set[Tuple[bytes, int]]:
|
||||
"""
|
||||
Find those outputs from transactions in the pool that are spent by another transaction in the pool,
|
||||
|
||||
:return: Set of spent (transaction hash, output index)-pairs.
|
||||
"""
|
||||
return {(input.prev_tx_hash, input.txout_index)
|
||||
for transaction in self.values()
|
||||
for input in transaction.inputs}
|
||||
|
||||
def transaction(self, transaction_hash) -> Optional[Transaction]:
|
||||
"""
|
||||
Retrieve a transaction from the pool based on its hash.
|
||||
:param transaction_hash: The hash of the transaction to retrieve from the pool.
|
||||
:return: Transaction matching the given transaction_hash if it exists in the pool. None otherwise.
|
||||
"""
|
||||
return self.get(transaction_hash)
|
||||
|
||||
def txo(self, transaction_hash, index) -> Optional[Output]:
|
||||
"""
|
||||
Retrieve a transaction output from the pool based on its containing transaction's hash and its index.
|
||||
|
||||
:param transaction_hash: The hash of the transaction containing the desired output.
|
||||
:param index: The index of the output in the transaction.
|
||||
:return: Output if it exists. None otherwise.
|
||||
"""
|
||||
try:
|
||||
return self.transaction(transaction_hash).outputs[index]
|
||||
except (AttributeError, KeyError):
|
||||
return None
|
||||
|
||||
def utxo(self, transaction_hash, index) -> Optional[Output]:
|
||||
"""
|
||||
Retrieve an unspent transaction output from the pool based on its containing transaction's hash and its index.
|
||||
|
||||
:param transaction_hash: The hash of the transaction containing the desired output.
|
||||
:param index: The index of the output in the transaction.
|
||||
:return: Output if it exists and is unspent. None otherwise.
|
||||
"""
|
||||
if (transaction_hash, index) in self.spent:
|
||||
logger.debug("Returning none")
|
||||
return None
|
||||
return self.txo(transaction_hash, index)
|
||||
|
||||
def utxos_of_addresses(self, addresses) -> Set[Output]:
|
||||
"""
|
||||
Retrieve list of unspent transaction outputs which were sent to any of the given addresses.
|
||||
|
||||
:param addresses: Collection of addresses.
|
||||
:return: Potentially empty set of unspent transaction outputs.
|
||||
"""
|
||||
spent = self.spent # so we don't have to recalculate in the for-loop
|
||||
s = set()
|
||||
for transaction in self.values():
|
||||
for index, output in enumerate(transaction.outputs):
|
||||
if output.address in addresses and (transaction.hash, index) not in spent:
|
||||
# Set convenient attributes to be compatible with the method of the same name from the Blockchain.
|
||||
output.transaction = transaction
|
||||
output._index = index
|
||||
s.add(output)
|
||||
return s
|
||||
|
||||
def txos_of_addresses(self, addresses) -> Set[Output]:
|
||||
"""
|
||||
Retrieve list of transaction outputs which were sent to any of the given addresses.
|
||||
|
||||
:param addresses: Collection of addresses.
|
||||
:return: Potentially empty set of transaction outputs.
|
||||
"""
|
||||
s = set()
|
||||
for transaction in self.values():
|
||||
for index, output in enumerate(transaction.outputs):
|
||||
if output.address in addresses:
|
||||
output.transaction = transaction
|
||||
output._index = index
|
||||
s.add(output)
|
||||
return s
|
||||
|
||||
def conflicts(self, transaction) -> bool:
|
||||
"""
|
||||
Returns whether the given transaction conflicts with any other transaction in the mempool, i.e. for each input
|
||||
in transaction, if the referenced output is spent by (has same input as) another transaction in the mempool.
|
||||
|
||||
:param transaction: The transaction to check.
|
||||
:return: True if transaction conflicts. False otherwise.
|
||||
"""
|
||||
referenced_outputs = set((input.prev_tx_hash, input.txout_index) for input in transaction.inputs)
|
||||
spent = self.spent # calculate once for optimisation
|
||||
return any(output in spent for output in referenced_outputs)
|
332
aucoin/miner.py
Normal file
332
aucoin/miner.py
Normal file
|
@ -0,0 +1,332 @@
|
|||
import logging
|
||||
import multiprocessing
|
||||
import secrets
|
||||
import time
|
||||
from collections import namedtuple
|
||||
from datetime import datetime
|
||||
from multiprocessing import Process, Pipe, Queue, Value
|
||||
from queue import Empty
|
||||
from threading import Thread
|
||||
from typing import Set
|
||||
|
||||
from cryptography.hazmat.backends import default_backend
|
||||
from cryptography.hazmat.primitives import serialization
|
||||
|
||||
from aucoin import consensus, dsa
|
||||
from aucoin.block import Block
|
||||
from aucoin.blockchain import Blockchain
|
||||
from aucoin.database import session_scope
|
||||
from aucoin.exceptions import InvalidException
|
||||
from aucoin.mempool import Mempool
|
||||
from aucoin.transactions import CoinbaseTransaction, Transaction
|
||||
from aucoin.wallet import Wallet, public_bytes
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Miner(object):
|
||||
def __init__(self, core, blockchain: Blockchain, mempool: Mempool, wallet: Wallet, found_callback):
|
||||
"""
|
||||
:param core: The core object is needed to access its lock.
|
||||
:param blockchain: The blockchain we're mining on.
|
||||
:param mempool: The pool of valid transactions yet to be included in the blockchain.
|
||||
:param wallet: The wallet to whom the coinbase should be sent to.
|
||||
:param found_callback: Function that should be called when a new block is found.
|
||||
"""
|
||||
self.core = core
|
||||
self.blockchain = blockchain
|
||||
self.mempool = mempool
|
||||
self.wallet = wallet
|
||||
self.found_callback = found_callback
|
||||
|
||||
self.workers = []
|
||||
self.found_queue = None
|
||||
self.performance = None
|
||||
self.stopped = True
|
||||
self.timestamp = None # When we started working on block. new_block updates this while new_transaction doesn't
|
||||
|
||||
# Information related to the coinbase
|
||||
self.address = None
|
||||
self.keypair = None
|
||||
self._update_address()
|
||||
|
||||
self.found_queue_watcher_thread = None
|
||||
self.performance_reporter_thread = None
|
||||
|
||||
def start(self, workers=multiprocessing.cpu_count()-1):
|
||||
"""
|
||||
Start the mining process.
|
||||
|
||||
:param workers: Number of mining workers to start.
|
||||
"""
|
||||
logger.info("Starting miner")
|
||||
|
||||
# Set up concurrent communication
|
||||
self.workers = []
|
||||
self.found_queue = Queue()
|
||||
self.stopped = False
|
||||
|
||||
# Set up threads
|
||||
self.found_queue_watcher_thread = Thread(target=self._found_queue_watcher, daemon=True)
|
||||
self.performance_reporter_thread = Thread(target=self._performance_reporter, daemon=True)
|
||||
self.found_queue_watcher_thread.start()
|
||||
self.performance_reporter_thread.start()
|
||||
|
||||
# Create worker processors
|
||||
logger.debug("Creating %s workers", workers)
|
||||
for w in range(workers):
|
||||
pipe_rec, pipe_send = Pipe(duplex=False) # (receive only, send only)
|
||||
iterations = Value("L", 0, lock=False) # "L" only guarantees 32 bits but on 64 bit systems it should be 64
|
||||
process = Process(target=mine, args=(w, pipe_rec, self.found_queue, iterations))
|
||||
self.workers.append((process, pipe_send, iterations))
|
||||
|
||||
# Send initial block to workers
|
||||
self.new_block()
|
||||
|
||||
# Start workers
|
||||
logger.info("Starting %s workers", workers)
|
||||
for process, _, _ in self.workers:
|
||||
process.start()
|
||||
|
||||
def stop(self):
|
||||
"""
|
||||
Stop the mining process.
|
||||
"""
|
||||
logger.info("Stopping miner")
|
||||
|
||||
# Ask threads to stop
|
||||
self.stopped = True
|
||||
self.found_queue_watcher_thread.join()
|
||||
self.performance_reporter_thread.join()
|
||||
|
||||
# Terminate workers
|
||||
for process, _, _ in self.workers:
|
||||
process.terminate()
|
||||
self.workers = []
|
||||
|
||||
def new_block(self, block=None):
|
||||
"""
|
||||
This function should be called whenever the blockchain updates its header so that we can begin mining on it.
|
||||
|
||||
:param block: The new header block; not used but provided when this function is subscribed as a callback.
|
||||
"""
|
||||
if self.stopped:
|
||||
return
|
||||
|
||||
logger.debug("Blockchain changed!")
|
||||
self.timestamp = int(datetime.utcnow().timestamp())
|
||||
self._update_block()
|
||||
|
||||
def new_transaction(self, transaction=None):
|
||||
"""
|
||||
This function should be called whenever the mempool gets a new transaction so that we can include it.
|
||||
:param transaction: The new transaction; not used but provided when this function is subscribed as a callback.
|
||||
"""
|
||||
if self.stopped:
|
||||
return
|
||||
|
||||
logger.debug("Mempool changed!")
|
||||
self._update_block()
|
||||
|
||||
def _update_address(self):
|
||||
"""
|
||||
Updates the address to which the coinbase is sent by requesting a freshly-generated address from the wallet.
|
||||
Also updates the keypair associated with the address. This function is called by the found_queue_watcher when we
|
||||
find a new block, to increase anonymity.
|
||||
"""
|
||||
self.address = self.wallet.new_address()
|
||||
self.keypair = self.wallet.keys[self.address]
|
||||
|
||||
def _update_block(self):
|
||||
"""
|
||||
Construct a new block and send it to the worker processors.
|
||||
"""
|
||||
with session_scope() as session, self.core.lock:
|
||||
# Calculate how much space we have left for transactions by creating a template block
|
||||
template = Block(
|
||||
hash_prev_block=bytes(32),
|
||||
target=bytes(32),
|
||||
signature=bytes(100), # Signature and public_key doesn't have a predefined length, but they're usually 70-80 bytes.
|
||||
public_key=bytes(100),
|
||||
transactions=[CoinbaseTransaction(bytes(32), coinbase=bytes(consensus.tx_coinbase_max_size))]
|
||||
)
|
||||
space_left = consensus.block_max_size - template.size
|
||||
|
||||
# Find the best transactions to include
|
||||
transactions, fees = find_best_transactions(self.blockchain, self.mempool, space_left, session)
|
||||
|
||||
prev_block = self.blockchain.header(session)
|
||||
public_key = public_bytes(self.keypair.public)
|
||||
|
||||
# Construct and send updated block to workers. The object is constructed in the loop because of SQLAlchemy.
|
||||
logger.debug("Sending updated block and private key to workers")
|
||||
for _, pipe, _ in self.workers:
|
||||
block = Block(
|
||||
hash_prev_block=prev_block.hash,
|
||||
timestamp=self.timestamp,
|
||||
public_key=public_key,
|
||||
transactions=[
|
||||
CoinbaseTransaction(
|
||||
address=self.address,
|
||||
value=consensus.block_reward + fees,
|
||||
block_height=prev_block.height + 1,
|
||||
coinbase=secrets.token_bytes(8) # ensure hashes are different across workers
|
||||
)
|
||||
] + list(transactions)
|
||||
)
|
||||
block.target = consensus.required_target(block, self.blockchain, session)
|
||||
|
||||
# Send the private key to the miner because it is a crucial part of the process in Sign to Mine.
|
||||
# Pipes only work with serializable objects, so need to serialize here and deserialize in the process.
|
||||
serialized_private_key = self.keypair.private.private_bytes(
|
||||
encoding=serialization.Encoding.PEM,
|
||||
format=serialization.PrivateFormat.PKCS8,
|
||||
encryption_algorithm=serialization.NoEncryption()
|
||||
)
|
||||
pipe.send((block, serialized_private_key))
|
||||
|
||||
def _found_queue_watcher(self):
|
||||
"""
|
||||
Continuously monitors the found_queue for blocks found by the workers. New blocks are sent to the
|
||||
self.found_callback callback function.
|
||||
"""
|
||||
while not self.stopped:
|
||||
try:
|
||||
block = self.found_queue.get(timeout=1) # only block for 1s to allow checking self.stopped regularly
|
||||
except Empty:
|
||||
continue
|
||||
|
||||
logger.debug("Found block with hash: %s", block.hash.hex())
|
||||
try:
|
||||
self.found_callback(block)
|
||||
|
||||
# Update the address to which the coinbase is sent every time we find a valid block
|
||||
self._update_address()
|
||||
|
||||
except InvalidException as e:
|
||||
logger.info("Mined invalid block: %s (%s)", block, e)
|
||||
logger.info("Restarting miner..")
|
||||
self.new_block()
|
||||
|
||||
logger.debug("Done with %s", block.hash.hex())
|
||||
|
||||
def _performance_reporter(self):
|
||||
"""
|
||||
Continuously reports the performance of workers in hashes/s.
|
||||
"""
|
||||
prev_total = 0
|
||||
while not self.stopped:
|
||||
total = sum(iterations.value for _, _, iterations in self.workers)
|
||||
self.performance = total - prev_total
|
||||
|
||||
prev_total = total
|
||||
time.sleep(1)
|
||||
|
||||
self.performance = None
|
||||
|
||||
|
||||
def mine(index, pipe: Pipe, found_queue: Queue, iterations: Value):
|
||||
"""
|
||||
Mine a block by continuously signing it until its hash is below or equal to the target.
|
||||
|
||||
:param index: The index of this mining worker; only used for logging purposes.
|
||||
:param pipe: The pipe used to communicate new blocks to the worker whenever the blockchain's header is updated.
|
||||
:param found_queue: Where to put found blocks.
|
||||
:param iterations: A ctypes object allocated from shared memory; it is shared to allow monitoring performance.
|
||||
"""
|
||||
try:
|
||||
logger.debug("Worker %s started - waiting for initial block", index)
|
||||
receive = True
|
||||
while True:
|
||||
# Receive updated block through pipe every 16384 iterations (empirically chosen for best performance)
|
||||
if receive or (iterations.value % 16384 == 0 and pipe.poll()):
|
||||
# While-loop makes sure we get the latest block that was sent through the pipe
|
||||
while receive or pipe.poll():
|
||||
block, serialized_private_key = pipe.recv() # blocking
|
||||
|
||||
# Deserialize private key
|
||||
private_key = serialization.load_pem_private_key(
|
||||
serialized_private_key,
|
||||
password=None,
|
||||
backend=default_backend()
|
||||
)
|
||||
|
||||
logger.debug("Received new block and private key through pipe")
|
||||
receive = False
|
||||
|
||||
# Sign the block's header (with the signature field truncated) using the private key matching the address
|
||||
# receiving the coinbase.
|
||||
block.signature = dsa.sign(private_key, block.truncated_header)
|
||||
|
||||
if block.hash <= block.target:
|
||||
logger.debug("Hash below target threshold found!")
|
||||
found_queue.put(block)
|
||||
receive = True
|
||||
|
||||
iterations.value += 1
|
||||
|
||||
except KeyboardInterrupt:
|
||||
logger.info("Stopped worker %s (KeyboardInterrupt)", index)
|
||||
|
||||
|
||||
def find_best_transactions(blockchain: Blockchain, mempool: Mempool, space: int, session) -> (Set[Transaction], int):
|
||||
"""
|
||||
Find the best transactions in memory pool and return them.
|
||||
|
||||
:param blockchain: A blockchain is required to look up the unspent transactions for the input-values.
|
||||
:param mempool: Pool from where to collect transactions.
|
||||
:param space: How much space (in bytes) is left in the block for transactions.
|
||||
:param session: Database session.
|
||||
:return: Tuple of the best transactions (fee per byte) and total fee.
|
||||
"""
|
||||
Info = namedtuple("Info", ("size", "fee", "dependencies"))
|
||||
|
||||
def get_info(transaction: Transaction, infos={}):
|
||||
# Build and return the Info-tuple for given transaction.
|
||||
# We define this additional function because the problem is an obvious application for dynamic programming.
|
||||
try:
|
||||
return infos[transaction]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
size = transaction.size
|
||||
fee = transaction.fee(blockchain, mempool, session)
|
||||
dependencies = set()
|
||||
|
||||
# Add referenced output transactions to list of dependencies if it is also in the mempool.
|
||||
# Notice that we don't do anything if this input references an output from the blockchain: in this case it
|
||||
# is "free" to include the transaction (there is no dependency tree, so no additional size or fees).
|
||||
for input in transaction.inputs:
|
||||
dependency = mempool.transaction(input.prev_tx_hash)
|
||||
|
||||
# If dependency is None it doesn't exist in mempool (so it must exist in blockchain)
|
||||
if dependency is not None:
|
||||
# Recursively build dependency tree
|
||||
info = get_info(dependency)
|
||||
|
||||
size += info.size
|
||||
fee += info.fee
|
||||
dependencies.update(info.dependencies)
|
||||
|
||||
infos[transaction] = Info(size, fee, dependencies)
|
||||
return infos[transaction]
|
||||
|
||||
# If any of a transaction's inputs reference other transactions from the mempool, these in-pool transactions will
|
||||
# have to be included in the blockchain before (or in the same block) as this transaction. For each transaction, we
|
||||
# build an "Info" tuple containing the total size/fee for this transaction's entire dependency tree.
|
||||
promising = {transaction: get_info(transaction) for transaction in mempool.values()}
|
||||
|
||||
# Build list of best transactions by looping through promising transactions in the order of best to worst
|
||||
best_transactions = set()
|
||||
for transaction, info in sorted(promising.items(), key=lambda x: x[1].fee / x[1].size, reverse=True):
|
||||
# Skip if we don't have space for this transaction and its dependencies
|
||||
if space - sum(transaction.size for transaction in best_transactions) < info.size:
|
||||
continue
|
||||
|
||||
# Otherwise add transaction and its dependencies to the set of best transactions. Note that the transaction
|
||||
# might've already been added; this is why best_transactions is a set.
|
||||
best_transactions.add(transaction)
|
||||
best_transactions.update(info.dependencies)
|
||||
|
||||
total_fee = sum(transaction.fee(blockchain, mempool, session) for transaction in best_transactions)
|
||||
return best_transactions, total_fee
|
653
aucoin/network.py
Normal file
653
aucoin/network.py
Normal file
|
@ -0,0 +1,653 @@
|
|||
import json
|
||||
import logging
|
||||
import random
|
||||
import secrets
|
||||
import socket
|
||||
import time
|
||||
import warnings
|
||||
from collections import Counter, namedtuple
|
||||
from enum import Enum
|
||||
from threading import Thread
|
||||
from typing import Optional, List
|
||||
|
||||
from twisted.internet import threads
|
||||
from twisted.internet.protocol import connectionDone, ReconnectingClientFactory
|
||||
from twisted.protocols.basic import LineOnlyReceiver
|
||||
from twisted.python import log
|
||||
|
||||
from aucoin import __version__
|
||||
from aucoin.block import Block
|
||||
from aucoin.blockchain import Blockchain
|
||||
from aucoin.config import data_dir
|
||||
from aucoin.database import session_scope
|
||||
from aucoin.exceptions import InvalidException, OrphanException, OrphanBlockException, OrphanTransactionException
|
||||
from aucoin.mempool import Mempool
|
||||
from aucoin.transactions import Transaction
|
||||
|
||||
with warnings.catch_warnings():
|
||||
# Ignore warning about missing library for TLS hostname verification
|
||||
warnings.simplefilter("ignore")
|
||||
from twisted.internet import reactor
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MsgType(Enum):
|
||||
def __new__(cls):
|
||||
obj = object.__new__(cls)
|
||||
obj._value_ = len(cls.__members__) + 1
|
||||
return obj
|
||||
|
||||
HELLO = () # The initial greeting message exchanged between peers after connecting
|
||||
|
||||
PEERS = () # List of IP addresses of our peers
|
||||
PEERS_OFFER = () # Empty message offering our peer list to counterpart. Broadcast when we get a new peer
|
||||
PEERS_REQUEST = () # Empty message requesting counterpart's peer list. Sent in response to PEERS_OFFER
|
||||
|
||||
BLOCK = () # A single block. Only sent in response to BLOCK_REQUEST
|
||||
BLOCK_OFFER = () # "I have this block, request it if you need". Broadcast when blockchain gets a new header
|
||||
BLOCK_REQUEST = () # Request a single block by hash
|
||||
|
||||
BLOCKS_REQUEST = () # Request a BLOCKS_OFFER by sending the hash of our blockchain's header
|
||||
BLOCKS_OFFER = () # "These blocks are ahead of your head, request any you need". Sent in response to BLOCKS_REQUEST
|
||||
|
||||
TRANSACTION = () # A single transaction. Only sent in response to TRANSACTION_REQUEST
|
||||
TRANSACTION_OFFER = () # "I have this tx, request it if you need". Broadcast when mempool gets a new transaction
|
||||
TRANSACTION_REQUEST = () # Request a single transaction by hash
|
||||
|
||||
BAN = () # A message telling the counterpart we don't want to talk to them for a (un)specified amount of time
|
||||
|
||||
|
||||
class NetworkProtocol(LineOnlyReceiver):
|
||||
MAX_LENGTH = 5_242_880 # 5 MiB
|
||||
|
||||
def __init__(self, factory):
|
||||
"""
|
||||
Each NetworkProtocol represents one connection.
|
||||
Call transport.write() or sendLine() to write some data to the other end.
|
||||
"""
|
||||
self.factory = factory
|
||||
self.received_hello = False
|
||||
|
||||
@property
|
||||
def peer_address(self):
|
||||
"""
|
||||
:return: The IP address of the counterpart as seen from our perspective.
|
||||
"""
|
||||
return self.transport.getPeer().host
|
||||
|
||||
def connectionMade(self):
|
||||
"""
|
||||
This may be considered the initializer of the protocol, because it is called when a connection is completed.
|
||||
If you need to send any greeting or initial message, do it here.
|
||||
"""
|
||||
logger.debug("Protocol connection to %s made", self.peer_address)
|
||||
|
||||
# Update peerlist
|
||||
self.factory.peers.append(self)
|
||||
save_peers(self.factory.peerlist)
|
||||
|
||||
self.send_hello()
|
||||
|
||||
def connectionLost(self, reason=connectionDone):
|
||||
"""
|
||||
Called when the connection is shut down.
|
||||
Clear any any external references to this Protocol here. The connection has been closed.
|
||||
"""
|
||||
logger.info("Protocol connection to %s lost. Reason: %s", self.peer_address, reason.value)
|
||||
|
||||
# Update peerlist
|
||||
self.factory.peers.remove(self)
|
||||
save_peers(self.factory.peerlist)
|
||||
|
||||
# Bootstrap network again if we just lost our last peer
|
||||
if not self.factory.peers and self.factory.network.max_peers != 0:
|
||||
logger.info("Last peer lost")
|
||||
self.factory.bootstrap()
|
||||
|
||||
def disconnect(self):
|
||||
"""
|
||||
Disconnect from peer. connectionLost() will make sure to update peerlist etc.
|
||||
Note: transport.loseConnection() may not result in the connection closing immediately, e.g. if you have writes
|
||||
buffered. To close the connection immediately, discarding any buffered writes, call transport.abortConnection().
|
||||
"""
|
||||
logger.debug("Disconnecting from %s", self.peer_address)
|
||||
self.transport.loseConnection()
|
||||
|
||||
def lineReceived(self, line):
|
||||
"""
|
||||
Called for each received line.
|
||||
"""
|
||||
try:
|
||||
# Decode bytes to json
|
||||
msg = json.loads(line)
|
||||
msg_type = MsgType[msg["msg_type"]]
|
||||
|
||||
# First message must be HELLO
|
||||
if not self.received_hello and msg_type != MsgType.HELLO:
|
||||
return self.send_ban(reason="First message not HELLO")
|
||||
|
||||
# Handlers
|
||||
handlers = {
|
||||
MsgType.HELLO: self.receive_hello,
|
||||
MsgType.PEERS: self.receive_peers,
|
||||
MsgType.PEERS_OFFER: self.receive_peers_offer,
|
||||
MsgType.PEERS_REQUEST: self.receive_peers_request,
|
||||
MsgType.BLOCK: self.receive_block,
|
||||
MsgType.BLOCK_OFFER: self.receive_block_offer,
|
||||
MsgType.BLOCK_REQUEST: self.receive_block_request,
|
||||
MsgType.BLOCKS_REQUEST: self.receive_blocks_request,
|
||||
MsgType.BLOCKS_OFFER: self.receive_blocks_offer,
|
||||
MsgType.TRANSACTION: self.receive_transaction,
|
||||
MsgType.TRANSACTION_OFFER: self.receive_transaction_offer,
|
||||
MsgType.TRANSACTION_REQUEST: self.receive_transaction_request,
|
||||
MsgType.BAN: self.receive_ban
|
||||
}
|
||||
|
||||
# Call appropriate handler with unpacked payload
|
||||
payload = msg["payload"] or {} # "or {}" because some messages don't have a payload, but can't unpack None
|
||||
logger.debug("Received %s from %s: %s", msg_type.name, self.peer_address, payload)
|
||||
handlers[msg_type](**payload)
|
||||
|
||||
except Exception as e:
|
||||
# Too broad exception clause? Maybe, but we don't want malicious peers to crash us.
|
||||
logger.exception(e)
|
||||
logger.debug("Processing data from %s caused error. Malicious peer?", self.peer_address)
|
||||
self.send_ban(reason="Malformed data")
|
||||
|
||||
def send(self, msg_type: MsgType, payload=None):
|
||||
"""
|
||||
Pack msg_type and payload in json and send it over the wire.
|
||||
|
||||
:param msg_type: The message type.
|
||||
:param payload: The actual data payload.
|
||||
"""
|
||||
logger.debug("Sending %s to %s: %s", msg_type.name, self.peer_address, payload)
|
||||
self.sendLine(json.dumps({
|
||||
"msg_type": msg_type.name,
|
||||
"payload": payload
|
||||
}).encode("utf8"))
|
||||
|
||||
##### HELLO #####
|
||||
|
||||
# HELLO
|
||||
def send_hello(self):
|
||||
payload = {
|
||||
"version": __version__,
|
||||
"your_ip": self.peer_address,
|
||||
"nonce": self.factory.nonce
|
||||
}
|
||||
self.send(MsgType.HELLO, payload)
|
||||
|
||||
def receive_hello(self, version=None, your_ip=None, nonce=None):
|
||||
self.received_hello = True
|
||||
|
||||
# Should we disconnect?
|
||||
abort_reason = self.factory.should_abort(self.peer_address, nonce, version)
|
||||
if abort_reason:
|
||||
logger.debug("Disconnecting from %s: %s", self.peer_address, abort_reason)
|
||||
|
||||
# Send our peerlist to counterpart before disconnecting; he might have no one else to talk to :(
|
||||
# Note: this is not an offer; disconnect() will disconnect as soon as our send/receive buffer is empty so
|
||||
# we don't have time to wait for a response to our offer. We send our peers, they take it or leave it.
|
||||
self.send_peers()
|
||||
return self.send_ban(reason=abort_reason)
|
||||
|
||||
self.factory.my_ips[self.peer_address] = your_ip
|
||||
|
||||
# Broadcast peers_offer to let everyone know we have a new node in our peerlist. Other nodes will send a
|
||||
# PEERS_REQUEST if they need more peers. The newly connected counterpart will also get the offer.
|
||||
self.factory.broadcast(NetworkProtocol.send_peers_offer)
|
||||
|
||||
# Request list of block hashes, peer might have, that are ahead of our blockchain's header
|
||||
with session_scope() as session:
|
||||
header_hash = self.factory.network.blockchain.header(session).hash
|
||||
self.send_blocks_request(header_hash.hex())
|
||||
|
||||
##### PEERS #####
|
||||
|
||||
# PEERS
|
||||
def send_peers(self):
|
||||
payload = {
|
||||
"peers": self.factory.peerlist
|
||||
}
|
||||
self.send(MsgType.PEERS, payload)
|
||||
|
||||
def receive_peers(self, peers=None):
|
||||
random.shuffle(peers)
|
||||
for address in peers:
|
||||
# Delay connection attempts 0-5 seconds to avoid peers connecting to each other at the *exact* same time
|
||||
reactor.callLater(random.uniform(0, 5), self.factory.connect, address)
|
||||
|
||||
# PEERS_OFFER
|
||||
def send_peers_offer(self):
|
||||
self.send(MsgType.PEERS_OFFER)
|
||||
|
||||
def receive_peers_offer(self):
|
||||
if len(self.factory.peers) < self.factory.network.max_peers:
|
||||
self.send_peers_request()
|
||||
|
||||
# PEERS_REQUEST
|
||||
def send_peers_request(self):
|
||||
self.send(MsgType.PEERS_REQUEST)
|
||||
|
||||
def receive_peers_request(self):
|
||||
self.send_peers()
|
||||
|
||||
##### BLOCK #####
|
||||
|
||||
# BLOCK
|
||||
def send_block(self, block):
|
||||
self.send(MsgType.BLOCK, {"block": block.raw})
|
||||
|
||||
def receive_block(self, block=None):
|
||||
self.factory.network.notify_new_block(Block.from_raw(**block), self)
|
||||
|
||||
# BLOCK_OFFER
|
||||
def send_block_offer(self, block):
|
||||
self.send(MsgType.BLOCK_OFFER, {"block_hash": block.hash.hex()})
|
||||
|
||||
def receive_block_offer(self, block_hash=None):
|
||||
block_hash_bytes = bytes.fromhex(block_hash)
|
||||
with session_scope() as session:
|
||||
if not (self.factory.network.blockchain.block(block_hash_bytes, session)
|
||||
or block_hash_bytes in self.factory.network.mempool.orphans):
|
||||
self.send_block_request(block_hash)
|
||||
|
||||
# BLOCK_REQUEST
|
||||
def send_block_request(self, block_hash):
|
||||
self.send(MsgType.BLOCK_REQUEST, {"block_hash": block_hash})
|
||||
|
||||
def receive_block_request(self, block_hash=None):
|
||||
with session_scope() as session:
|
||||
block = self.factory.network.blockchain.block(bytes.fromhex(block_hash), session)
|
||||
if block:
|
||||
self.send_block(block)
|
||||
|
||||
##### BLOCKS #####
|
||||
|
||||
# BLOCKS_REQUEST
|
||||
def send_blocks_request(self, header_hash):
|
||||
self.send(MsgType.BLOCKS_REQUEST, {"header_hash": header_hash})
|
||||
|
||||
def receive_blocks_request(self, header_hash=None):
|
||||
# Find up to 1000 block hashes ahead of peer's header
|
||||
with session_scope() as session:
|
||||
blocks = self.factory.network.blockchain.blocks_ahead(
|
||||
bytes.fromhex(header_hash),
|
||||
session,
|
||||
limit=100
|
||||
)
|
||||
|
||||
self.send_blocks_offer([block._hash.hex() for block in blocks])
|
||||
|
||||
# BLOCKS_OFFER
|
||||
def send_blocks_offer(self, block_hashes):
|
||||
self.send(MsgType.BLOCKS_OFFER, {"block_hashes": block_hashes})
|
||||
|
||||
def receive_blocks_offer(self, block_hashes=None):
|
||||
# Peer has no blocks ahead our header => peer has the same header as us
|
||||
if not block_hashes:
|
||||
return
|
||||
|
||||
# Process all blocks like they were individual offers
|
||||
for block_hash in block_hashes:
|
||||
self.receive_block_offer(block_hash)
|
||||
|
||||
# Since BLOCKS_OFFER has a limit, send a new BLOCKS_REQUEST requesting blocks from the last hash in list
|
||||
self.send_blocks_request(block_hashes[-1])
|
||||
|
||||
##### TRANSACTION #####
|
||||
|
||||
# TRANSACTION
|
||||
def send_transaction(self, transaction):
|
||||
self.send(MsgType.TRANSACTION, {"transaction": transaction.raw})
|
||||
|
||||
def receive_transaction(self, transaction=None):
|
||||
# Disregard if received transaction is CoinbaseTransaction since those don't work as a standalone transaction
|
||||
if "coinbase" in transaction["inputs"][0]:
|
||||
return logger.debug("Disregarding transaction %s: is coinbase transaction", transaction["hash"])
|
||||
|
||||
self.factory.network.notify_new_transaction(Transaction.from_raw(**transaction), self)
|
||||
|
||||
# TRANSACTION_OFFER
|
||||
def send_transaction_offer(self, transaction):
|
||||
self.send(MsgType.TRANSACTION_OFFER, {"transaction_hash": transaction.hash.hex()})
|
||||
|
||||
def receive_transaction_offer(self, transaction_hash=None):
|
||||
transaction_hash_bytes = bytes.fromhex(transaction_hash)
|
||||
with session_scope() as session:
|
||||
if not (self.factory.network.blockchain.transaction(transaction_hash_bytes, session)
|
||||
or self.factory.network.mempool.transaction(transaction_hash_bytes)
|
||||
or transaction_hash_bytes in self.factory.network.mempool.orphans):
|
||||
self.send_transaction_request(transaction_hash)
|
||||
|
||||
# TRANSACTION_REQUEST
|
||||
def send_transaction_request(self, transaction_hash):
|
||||
self.send(MsgType.TRANSACTION_REQUEST, {"transaction_hash": transaction_hash})
|
||||
|
||||
def receive_transaction_request(self, transaction_hash=None):
|
||||
with session_scope() as session:
|
||||
transaction = self.factory.network.blockchain.transaction(bytes.fromhex(transaction_hash), session) or \
|
||||
self.factory.network.mempool.transaction(bytes.fromhex(transaction_hash))
|
||||
if transaction:
|
||||
self.send_transaction(transaction)
|
||||
|
||||
##### BAN #####
|
||||
|
||||
# BAN
|
||||
def send_ban(self, seconds=60, reason=None):
|
||||
self.factory.banned[self.peer_address] = time.time() + seconds
|
||||
self.send(MsgType.BAN, {"seconds": seconds, "reason": reason})
|
||||
self.disconnect()
|
||||
|
||||
def receive_ban(self, seconds=60, reason=None):
|
||||
self.factory.banned[self.peer_address] = time.time() + seconds
|
||||
|
||||
|
||||
class NetworkFactory(ReconnectingClientFactory):
|
||||
def __init__(self, network):
|
||||
self.network = network
|
||||
|
||||
self.peers = []
|
||||
self.my_ips = {} # {peer-ip: my-ip-address, ...}
|
||||
self.banned = {} # {address: expiration-time, ...}
|
||||
self.nonce = secrets.randbits(32) # used to detect connections to ourselves
|
||||
|
||||
@property
|
||||
def peerlist(self):
|
||||
"""
|
||||
:return: List of IP-addresses we are connected to.
|
||||
"""
|
||||
return [peer.transport.getPeer().host for peer in self.peers]
|
||||
|
||||
@property
|
||||
def my_ip(self):
|
||||
"""
|
||||
:return: Tuple of the ip we've most commonly been told is ours and the confidence.
|
||||
"""
|
||||
my_ip = namedtuple("my_ip", ["ip", "confidence"])
|
||||
try:
|
||||
ip, count = Counter(self.my_ips.values()).most_common(1)[0]
|
||||
return my_ip(ip, count / len(self.my_ips))
|
||||
except IndexError:
|
||||
# We haven't connected to anyone yet
|
||||
return my_ip(None, 0)
|
||||
|
||||
def connect(self, address):
|
||||
"""
|
||||
Establish a new connection to given address.
|
||||
|
||||
:param address: The IP-address to connect to.
|
||||
"""
|
||||
reactor.connectTCP(address, self.network.port, self, timeout=5, bindAddress=(self.network.interface, 0))
|
||||
|
||||
def bootstrap(self):
|
||||
"""
|
||||
Bootstrap the network by connecting to provided IP-addresses, known peers from file, or seeds from DNS.
|
||||
"""
|
||||
logger.info("Bootstrapping network")
|
||||
for address in self.network.manual_seeds or load_peers() or seeds():
|
||||
self.connect(address)
|
||||
|
||||
def startedConnecting(self, connector):
|
||||
"""
|
||||
Called when a connection has been started (as a client), both by connect() and retry().
|
||||
Call connector.stopConnecting() to stop the connection attempt.
|
||||
"""
|
||||
logger.debug("Client started connecting to %s", connector.host)
|
||||
|
||||
abort_reason = self.should_abort(peer_address=connector.host, client=True)
|
||||
if abort_reason:
|
||||
logger.debug("Abort connection to %s: %s", connector.host, abort_reason)
|
||||
connector.stopConnecting()
|
||||
|
||||
super().startedConnecting(connector)
|
||||
|
||||
def buildProtocol(self, addr):
|
||||
"""
|
||||
Called when a new connection was established (either as client or as server).
|
||||
None may be returned to immediately close the new connection.
|
||||
This function must call resetDelay().
|
||||
"""
|
||||
logger.info("Connection established to %s", addr.host)
|
||||
self.resetDelay()
|
||||
|
||||
return NetworkProtocol(self)
|
||||
|
||||
def clientConnectionLost(self, connector, reason):
|
||||
"""
|
||||
Called when a connection was made and then disconnected.
|
||||
Will retry the connection with increasing delay.
|
||||
"""
|
||||
logger.debug("Client connection to %s lost. WILL try to reconnect. Reason: %s", connector.host, reason.value)
|
||||
super().clientConnectionLost(connector, reason)
|
||||
|
||||
def clientConnectionFailed(self, connector, reason):
|
||||
"""
|
||||
Called when a connection could not be established.
|
||||
Will stop retrying.
|
||||
"""
|
||||
logger.debug("Client connection to %s could not be established. Will NOT try to reconnect. Reason: %s", connector.host, reason.value)
|
||||
self.stopTrying()
|
||||
super().clientConnectionFailed(connector, reason)
|
||||
|
||||
def should_abort(self, peer_address=None, nonce=None, version=None, client=False) -> Optional[str]:
|
||||
"""
|
||||
Determines if we should disconnect/abort connection to peer.
|
||||
|
||||
:return: None if we shouldn't abort. Otherwise return abort reason as string.
|
||||
"""
|
||||
# Abort if peer is banned (either because they banned us or because we banned them)
|
||||
ban_time = self.banned.get(peer_address, 0)
|
||||
if ban_time > time.time():
|
||||
return f"Banned ({ban_time - time.time():.0f} seconds remaining)"
|
||||
|
||||
# Abort if we have max_peers.
|
||||
# As a client, add the yet-to-be-established connection to number of peers to check if it would put us above
|
||||
if len(self.peers) + client > self.network.max_peers:
|
||||
return "Have max peers"
|
||||
|
||||
# Abort if already connected to peer.
|
||||
# Only abort duplicate connections as a client because multiple incoming connections from the same IP is
|
||||
# acceptable since clients can share external IP due to NAT
|
||||
if client and peer_address in self.peerlist:
|
||||
return "Already connected to IP address"
|
||||
|
||||
# Abort if connected to self according to other peers.
|
||||
# The client may not be able to connect to itself due to NAT, so it may never get a chance to send the nonce
|
||||
if peer_address == self.my_ip.ip:
|
||||
return "Connection to own IP address"
|
||||
|
||||
# Abort if connected to self according to nonce
|
||||
if nonce == self.nonce:
|
||||
return "Connection to ourselves"
|
||||
|
||||
# Abort if peer's protocol version older than ours
|
||||
if version is not None and version < __version__:
|
||||
return "Incompatible protocol version"
|
||||
|
||||
return None
|
||||
|
||||
def broadcast(self, func, *args, **kwargs):
|
||||
"""
|
||||
Broadcast to all peers by calling func on each connection.
|
||||
|
||||
:param func: The function to call on each connection (protocol). E.g. NetworkProtocol.send_peers_offer.
|
||||
"""
|
||||
logger.debug("Broadcasting %s", func.__name__)
|
||||
for peer in self.peers:
|
||||
peer.__getattribute__(func.__name__)(*args, **kwargs) # please don't mind this weird syntax, it works!
|
||||
|
||||
|
||||
class Network(object):
|
||||
def __init__(self, blockchain: Blockchain, mempool: Mempool, max_peers=100, interface="0.0.0.0", port=8334,
|
||||
manual_seeds=None):
|
||||
"""
|
||||
The object which holds the factory and all protocols. This is the class the core (and others) interface with.
|
||||
|
||||
:param blockchain: The blockchain instance from core. Used to look up blocks and transactions.
|
||||
:param mempool: The mempool instance from core. Used to look up unconfirmed transactions.
|
||||
:param max_peers: The maximum number of connected peers.
|
||||
:param interface: The interface the server should listen on. Defaults to all.
|
||||
:param port: The port to listen on / use to connect to others. Defaults to Bitcoin's port + 1.
|
||||
:param manual_seeds: Collection of IP-addresses to use for bootstrapping. Overrides file/DNS if provided.
|
||||
"""
|
||||
self.blockchain = blockchain
|
||||
self.mempool = mempool
|
||||
self.max_peers = max_peers
|
||||
self.interface = interface
|
||||
self.port = port
|
||||
self.manual_seeds = manual_seeds or []
|
||||
|
||||
self._new_block_subscribers = set()
|
||||
self._new_transaction_subscribers = set()
|
||||
|
||||
self.factory = NetworkFactory(self) # the factory is shared between all connections (protocols)
|
||||
|
||||
def start(self):
|
||||
"""
|
||||
Start the network.
|
||||
"""
|
||||
self.factory.bootstrap()
|
||||
|
||||
# Configure and start server (reactor) in new thread
|
||||
reactor.listenTCP(interface=self.interface, port=self.port, factory=self.factory)
|
||||
Thread(target=reactor.run, kwargs={"installSignalHandlers": False}, daemon=True).start()
|
||||
|
||||
def connect(self, address):
|
||||
"""
|
||||
Establish a new connection to given address.
|
||||
|
||||
:param address: The IP-address to connect to.
|
||||
"""
|
||||
reactor.callFromThread(self.factory.connect, address)
|
||||
|
||||
def new_block(self, block):
|
||||
"""
|
||||
This function should be called whenever the validator validates a new block.
|
||||
|
||||
:param block: The new block.
|
||||
"""
|
||||
# Are we catching up to the rest of the network? If so, we don't need to broadcast every block we receive
|
||||
with session_scope() as session:
|
||||
if self.blockchain.catching_up_progress(session) < 1:
|
||||
return logger.debug("Skipping broadcast: is catching up")
|
||||
|
||||
self._broadcast(NetworkProtocol.send_block_offer, block)
|
||||
|
||||
def new_transaction(self, transaction):
|
||||
"""
|
||||
This function should be called whenever the validator validates a new transaction.
|
||||
:param transaction: The new transaction.
|
||||
"""
|
||||
self._broadcast(NetworkProtocol.send_transaction_offer, transaction)
|
||||
|
||||
def _broadcast(self, func, *args, **kwargs):
|
||||
reactor.callFromThread(self.factory.broadcast, func, *args, **kwargs)
|
||||
|
||||
def request_block(self, block_hash: bytes):
|
||||
"""
|
||||
Broadcast block request to the network.
|
||||
|
||||
:param block_hash: The hash of the block to request.
|
||||
"""
|
||||
self._broadcast(NetworkProtocol.send_block_request, block_hash.hex())
|
||||
|
||||
def request_transaction(self, transaction_hash: bytes):
|
||||
"""
|
||||
Broadcast transaction request to the network.
|
||||
|
||||
:param transaction_hash: The hash of the transaction to request.
|
||||
"""
|
||||
self._broadcast(NetworkProtocol.send_transaction_request, transaction_hash.hex())
|
||||
|
||||
# Subscribers and events
|
||||
|
||||
def subscribe_new_block(self, callback):
|
||||
self._new_block_subscribers.add(callback)
|
||||
|
||||
def notify_new_block(self, block, protocol):
|
||||
for subscriber in self._new_block_subscribers:
|
||||
self._notify(subscriber, block, protocol)
|
||||
|
||||
def subscribe_new_transaction(self, callback):
|
||||
self._new_transaction_subscribers.add(callback)
|
||||
|
||||
def notify_new_transaction(self, transaction, protocol):
|
||||
for subscriber in self._new_transaction_subscribers:
|
||||
self._notify(subscriber, transaction, protocol)
|
||||
|
||||
def _notify(self, subscriber, block_or_tx, protocol):
|
||||
d = threads.deferToThread(subscriber, block_or_tx)
|
||||
d.addErrback(self._handle_orphan, protocol)
|
||||
d.addErrback(self._handle_invalid)
|
||||
d.addErrback(self._handle_fatal)
|
||||
|
||||
def _handle_orphan(self, failure, protocol):
|
||||
failure.trap(OrphanException)
|
||||
missing = failure.value.missing
|
||||
|
||||
if failure.type is OrphanBlockException and missing not in self.blockchain.orphans:
|
||||
logger.debug("Requesting missing block: %s", missing.hex())
|
||||
protocol.send_block_request(missing.hex())
|
||||
|
||||
elif failure.type is OrphanTransactionException and missing not in self.mempool.orphans:
|
||||
logger.debug("Requesting missing transaction: %s", missing.hex())
|
||||
protocol.send_transaction_request(missing.hex())
|
||||
|
||||
@staticmethod
|
||||
def _handle_invalid(failure):
|
||||
failure.trap(InvalidException)
|
||||
logger.debug("Invalid: %s", failure.value)
|
||||
|
||||
@staticmethod
|
||||
def _handle_fatal(*args, **kwargs):
|
||||
log.err(*args, **kwargs)
|
||||
logger.critical("Critical error in network: incoming block/transaction was neither valid, invalid, or orphan!")
|
||||
|
||||
|
||||
def seeds(hostname="seed.aucoin.network") -> List[str]:
|
||||
"""
|
||||
Get list of seed IP-addresses from DNS.
|
||||
|
||||
:param hostname: The hostname used for lookup.
|
||||
:return: List of IP-addresses.
|
||||
"""
|
||||
hostname, aliaslist, ipaddrlist = socket.gethostbyname_ex(hostname)
|
||||
logger.debug("Seeds from %s: %s", hostname, ipaddrlist)
|
||||
return ipaddrlist
|
||||
|
||||
|
||||
def load_peers() -> List[str]:
|
||||
"""
|
||||
Load peers from file.
|
||||
|
||||
:return: List of peer IP-addresses.
|
||||
"""
|
||||
try:
|
||||
with open(data_dir.joinpath("peers.json"), "r") as f:
|
||||
return json.load(f)
|
||||
except FileNotFoundError:
|
||||
return []
|
||||
|
||||
|
||||
def save_peers(peers):
|
||||
"""
|
||||
Save peers to file.
|
||||
|
||||
:param peers: List of peer IP-addresses to save.
|
||||
"""
|
||||
with open(data_dir.joinpath("peers.json"), "w") as f:
|
||||
json.dump(list(peers), f)
|
||||
|
||||
|
||||
def local_ip():
|
||||
"""
|
||||
Returns local ip, e.g. 192.168.x.x.
|
||||
Based on https://stackoverflow.com/a/25850698.
|
||||
"""
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
s.connect(("192.0.2.0", 1)) # connect() for UDP doesn't send packets. 192.0.2.0 invalid according to RFC 5735.
|
||||
return s.getsockname()[0]
|
78
aucoin/statistic.py
Normal file
78
aucoin/statistic.py
Normal file
|
@ -0,0 +1,78 @@
|
|||
import json
|
||||
import logging.config
|
||||
from datetime import datetime
|
||||
|
||||
from aucoin import config
|
||||
from aucoin import util
|
||||
from aucoin.database import session_scope
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class StatisticsLogger(object):
|
||||
def __init__(self, core, blockchain, mempool, miner, network, wallet):
|
||||
self.core = core
|
||||
self.blockchain = blockchain
|
||||
self.mempool = mempool
|
||||
self.miner = miner
|
||||
self.network = network
|
||||
self.wallet = wallet
|
||||
|
||||
logger.info("Statistics logger enabled")
|
||||
util.make_data_dirs("statistics")
|
||||
|
||||
def new_header_block(self, header_block=None):
|
||||
with open(config.data_dir.joinpath("statistics/stats.json"), "a") as file:
|
||||
file.write(json.dumps(self.generate_stat()))
|
||||
file.write("\n")
|
||||
|
||||
def generate_stat(self):
|
||||
with session_scope() as session, self.core.lock:
|
||||
header = self.blockchain.header(session)
|
||||
balance = self.wallet.balance
|
||||
|
||||
try:
|
||||
header_timespan = header.timestamp - self.blockchain.block(header.hash_prev_block, session).timestamp
|
||||
except AttributeError: # if header is genesis
|
||||
header_timespan = 0
|
||||
|
||||
return {
|
||||
"utc": int(datetime.utcnow().timestamp()),
|
||||
"data": {
|
||||
"blockchain": {
|
||||
"header": {
|
||||
"hash": header.hash.hex(),
|
||||
"height": header.height,
|
||||
"difficulty": header.difficulty,
|
||||
"timestamp": header.timestamp,
|
||||
"timespan": header_timespan
|
||||
},
|
||||
"number_of_blocks": {
|
||||
"main_branch": self.blockchain.number_of_blocks(session, main_branch_only=True),
|
||||
"total": self.blockchain.number_of_blocks(session, main_branch_only=False)
|
||||
},
|
||||
"average_block_timespan": {
|
||||
"last_100": self.blockchain.average_block_timespan(session, n=100),
|
||||
"since_genesis": self.blockchain.average_block_timespan(session)
|
||||
},
|
||||
"number_of_orphans": len(self.blockchain.orphans),
|
||||
"size": self.blockchain.size
|
||||
},
|
||||
"mempool": {
|
||||
"number_of_transactions": len(self.mempool),
|
||||
"number_of_orhpans": len(self.mempool.orphans),
|
||||
"size": self.mempool.size
|
||||
},
|
||||
"miner": {
|
||||
"performance": self.miner.performance,
|
||||
"number_of_workers": len(self.miner.workers)
|
||||
},
|
||||
"network": {
|
||||
"number_of_peers": len(self.network.factory.peers)
|
||||
},
|
||||
"wallet": {
|
||||
"balance_confirmed": balance.confirmed,
|
||||
"balance_unconfirmed": balance.unconfirmed
|
||||
}
|
||||
}
|
||||
}
|
424
aucoin/transactions.py
Normal file
424
aucoin/transactions.py
Normal file
|
@ -0,0 +1,424 @@
|
|||
import json
|
||||
from copy import deepcopy
|
||||
from typing import Iterator, Tuple
|
||||
|
||||
from sqlalchemy import Column, Integer, Boolean, LargeBinary, ForeignKey
|
||||
from sqlalchemy.ext.orderinglist import ordering_list
|
||||
from sqlalchemy.orm import relationship
|
||||
|
||||
from aucoin import consensus
|
||||
from aucoin import database
|
||||
from aucoin import util
|
||||
|
||||
|
||||
class Input(database.DBBase):
|
||||
__tablename__ = "inputs"
|
||||
|
||||
# Directly mapped to python attributes:
|
||||
prev_tx_hash = Column(LargeBinary)
|
||||
txout_index = Column(Integer)
|
||||
signature = Column(LargeBinary)
|
||||
public_key = Column(LargeBinary)
|
||||
|
||||
# SQLAlchemy-internal stuff:
|
||||
_id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
_index = Column(Integer)
|
||||
_coinbase = Column(Boolean, default=False)
|
||||
|
||||
# Relationships:
|
||||
transaction_id = Column(Integer, ForeignKey("transactions._id"))
|
||||
transaction = relationship("Transaction", back_populates="inputs")
|
||||
|
||||
__mapper_args__ = {
|
||||
"polymorphic_on": _coinbase,
|
||||
"polymorphic_identity": False
|
||||
}
|
||||
|
||||
def __init__(self, prev_tx_hash: bytes, txout_index: int, signature: bytes = b"", public_key: bytes = b""):
|
||||
"""
|
||||
When creating an input for our own transaction, signature and public_key need not be provided (they are set by
|
||||
the wallet when signing). They do however need to be provided when receiving transaction from the network.
|
||||
"""
|
||||
self.prev_tx_hash = prev_tx_hash
|
||||
self.txout_index = txout_index
|
||||
self.signature = signature
|
||||
self.public_key = public_key
|
||||
|
||||
def __bytes__(self):
|
||||
return self.prev_tx_hash + \
|
||||
self.txout_index.to_bytes(4, "big") + \
|
||||
self.signature + \
|
||||
self.public_key
|
||||
|
||||
@property
|
||||
def hash(self) -> bytes:
|
||||
return util.hash(bytes(self))
|
||||
|
||||
@property
|
||||
def size(self) -> int:
|
||||
"""
|
||||
:return: Input size in bytes.
|
||||
"""
|
||||
return len(bytes(self))
|
||||
|
||||
@property
|
||||
def raw(self) -> dict:
|
||||
return {
|
||||
"prev_tx_hash": self.prev_tx_hash.hex(),
|
||||
"txout_index": self.txout_index,
|
||||
"signature": self.signature.hex(),
|
||||
"public_key": self.public_key.hex()
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def from_raw(prev_tx_hash=None, txout_index=None, signature=None, public_key=None, hash=None):
|
||||
return Input(
|
||||
prev_tx_hash=bytes.fromhex(prev_tx_hash),
|
||||
txout_index=txout_index,
|
||||
signature=bytes.fromhex(signature),
|
||||
public_key=bytes.fromhex(public_key)
|
||||
)
|
||||
|
||||
def json(self, indent=None) -> str:
|
||||
return json.dumps(self.raw, indent=indent)
|
||||
|
||||
def __str__(self, *args, **kwargs):
|
||||
return f"Input({self.json(indent=4)})"
|
||||
|
||||
|
||||
class CoinbaseInput(Input):
|
||||
__mapper_args__ = {
|
||||
"polymorphic_identity": True
|
||||
}
|
||||
|
||||
def __init__(self, block_height: int, coinbase: bytes):
|
||||
"""
|
||||
The CoinbaseInput has a "block_height" and "coinbase" instead of "signature" & "public_key".
|
||||
|
||||
In Bitcoin block_height and coinbase is actually added to the scriptSig part of the transaction input. But
|
||||
because we have simplified scriptSig away in favour of separate fields for signature and public_key (what would
|
||||
normal be in scriptSig) we're gonna have to have separate fields for block_height and coinbase as well. Those
|
||||
will, however, still actually be mapped to the 'signature' and 'public_key' fields behind the scenes.
|
||||
|
||||
Based on:
|
||||
https://bitcoin.org/en/developer-reference#coinbase
|
||||
https://en.bitcoin.it/wiki/Transaction#Generation
|
||||
https://bitcoin.stackexchange.com/questions/30764/why-block-height-is-required-in-coinbase
|
||||
|
||||
:param block_height: Height of the block that contains this CoinbaseInput. Genesis_block has height=0, and it is
|
||||
incremented for each block.
|
||||
|
||||
Block height is an attribute of the CoinbaseInput instead of the block itself, because if it wasn't it would be
|
||||
easy to create two CoinbaseInputs (and therefore two CoinbaseTransactions) with the same hash. This is a major
|
||||
problem because UTXOs are referred to first by their containing transaction's id (hash) and then by their index.
|
||||
By mining two blocks with identical CoinbaseTransactions (legal) it would then be possible to "overwrite" an
|
||||
older CoinbaseTransaction, rendering it unspendable. By including the height in the CoinbaseTransaction,
|
||||
manufacturing an identical hash becomes a problem of breaking the hash function (SHA256).
|
||||
|
||||
:param coinbase: Can contain any arbitrary data (up to 100 bytes); it isn't used.
|
||||
"""
|
||||
self.prev_tx_hash = bytes(32) # 0x00...00
|
||||
self.txout_index = 0 # is n=-1 for CoinbaseInputs in Bitcoin et al but we'd rather avoid signed bytes
|
||||
self.block_height = block_height
|
||||
self.coinbase = coinbase
|
||||
|
||||
@property
|
||||
def coinbase(self) -> bytes:
|
||||
return self.public_key
|
||||
|
||||
@coinbase.setter
|
||||
def coinbase(self, value):
|
||||
self.public_key = value
|
||||
|
||||
@property
|
||||
def block_height(self) -> int:
|
||||
return int.from_bytes(self.signature, "big")
|
||||
|
||||
@block_height.setter
|
||||
def block_height(self, value: int):
|
||||
self.signature = value.to_bytes(8, "big")
|
||||
|
||||
@property
|
||||
def raw(self):
|
||||
return {
|
||||
"prev_tx_hash": self.prev_tx_hash.hex(),
|
||||
"txout_index": self.txout_index,
|
||||
"block_height": self.block_height,
|
||||
"coinbase": self.coinbase.hex()
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def from_raw(prev_tx_hash=None, txout_index=None, block_height=None, coinbase=None, hash=None):
|
||||
return CoinbaseInput(
|
||||
block_height=block_height,
|
||||
coinbase=bytes.fromhex(coinbase)
|
||||
)
|
||||
|
||||
def __str__(self, *args, **kwargs):
|
||||
return f"CoinbaseInput({self.json(indent=4)})"
|
||||
|
||||
|
||||
class Output(database.DBBase):
|
||||
__tablename__ = "outputs"
|
||||
|
||||
# Directly mapped to python attributes:
|
||||
value = Column(Integer)
|
||||
address = Column(LargeBinary)
|
||||
|
||||
# SQLAlchemy-internal stuff:
|
||||
_id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
_index = Column(Integer)
|
||||
_spent = Column(Boolean, default=False)
|
||||
|
||||
# Relationships:
|
||||
transaction_id = Column(Integer, ForeignKey("transactions._id"))
|
||||
transaction = relationship("Transaction", back_populates="outputs")
|
||||
|
||||
def __init__(self, value: int, address: bytes):
|
||||
"""
|
||||
A transaction "spends" all (the money of) the inputs and assigns (/sends) it to the addresses in the outputs.
|
||||
|
||||
:param value: Amount of auc to send to the address.
|
||||
:param address: util.address() of a public key.
|
||||
"""
|
||||
self.value = value
|
||||
self.address = address
|
||||
|
||||
def __bytes__(self):
|
||||
return self.value.to_bytes(8, "big") + \
|
||||
self.address
|
||||
|
||||
@property
|
||||
def hash(self) -> bytes:
|
||||
return util.hash(bytes(self))
|
||||
|
||||
@property
|
||||
def size(self) -> int:
|
||||
"""
|
||||
:return: Output size in bytes.
|
||||
"""
|
||||
return len(bytes(self))
|
||||
|
||||
@property
|
||||
def raw(self) -> dict:
|
||||
return {
|
||||
"value": self.value,
|
||||
"address": self.address.hex()
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def from_raw(value=None, address=None, hash=None):
|
||||
return Output(
|
||||
value=value,
|
||||
address=bytes.fromhex(address)
|
||||
)
|
||||
|
||||
def json(self, indent=None) -> str:
|
||||
return json.dumps(self.raw, indent=indent)
|
||||
|
||||
def __str__(self, *args, **kwargs):
|
||||
return f"Output({self.json(indent=4)})"
|
||||
|
||||
|
||||
class Transaction(database.DBBase):
|
||||
__tablename__ = "transactions"
|
||||
|
||||
# Directly mapped to python attributes:
|
||||
version = Column(Integer)
|
||||
|
||||
# SQLAlchemy-internal stuff:
|
||||
_id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
_hash = Column(LargeBinary)
|
||||
_index = Column(Integer)
|
||||
_coinbase = Column(Boolean, default=False)
|
||||
|
||||
# Relationships:
|
||||
block_id = Column(Integer, ForeignKey("blocks._id"))
|
||||
block = relationship("Block", back_populates="transactions")
|
||||
inputs = relationship("Input",
|
||||
back_populates="transaction",
|
||||
order_by="Input._index",
|
||||
collection_class=ordering_list("_index"))
|
||||
outputs = relationship("Output",
|
||||
back_populates="transaction",
|
||||
order_by="Output._index",
|
||||
collection_class=ordering_list("_index"))
|
||||
|
||||
__mapper_args__ = {
|
||||
"polymorphic_on": _coinbase,
|
||||
"polymorphic_identity": False
|
||||
}
|
||||
|
||||
def __init__(self, version=1, inputs=None, outputs=None):
|
||||
"""
|
||||
A transaction which transfers (all) auc from the inputs to the outputs.
|
||||
|
||||
:param version: Indicates which validations rules to follow. Used for backwards-compatibility breaking updates to the protocol.
|
||||
:param inputs: List of Inputs.
|
||||
:param outputs: List of Outputs.
|
||||
"""
|
||||
self.version = version
|
||||
self.inputs = inputs
|
||||
self.outputs = outputs
|
||||
|
||||
# Synchronize ordering for ordering_list collections
|
||||
self.inputs.reorder()
|
||||
self.outputs.reorder()
|
||||
|
||||
def __bytes__(self):
|
||||
return self.version.to_bytes(4, "big") + \
|
||||
b"".join(bytes(input) for input in self.inputs) + \
|
||||
b"".join(bytes(output) for output in self.outputs)
|
||||
|
||||
@property
|
||||
def hash(self) -> bytes:
|
||||
"""
|
||||
Based on:
|
||||
https://bitcoin.stackexchange.com/questions/2859/how-are-transaction-hashes-calculated
|
||||
"""
|
||||
return util.hash(bytes(self))
|
||||
|
||||
@property
|
||||
def size(self) -> int:
|
||||
"""
|
||||
:return: Size of transaction in bytes.
|
||||
"""
|
||||
return len(bytes(self))
|
||||
|
||||
def fee(self, blockchain, mempool, session) -> int:
|
||||
"""
|
||||
The fee this transaction will pay the miner.
|
||||
|
||||
:param blockchain: The blockchain is required to look up the unspent transactions outputs for the input-values.
|
||||
:param mempool: The mempool, used like the blockchain.
|
||||
:param session: Database session.
|
||||
:return: The fee.
|
||||
"""
|
||||
sum_inputs = sum((blockchain.txo(input.prev_tx_hash, input.txout_index, session) or
|
||||
mempool.txo(input.prev_tx_hash, input.txout_index)).value
|
||||
for input in self.inputs)
|
||||
sum_outputs = sum(output.value for output in self.outputs)
|
||||
return sum_inputs - sum_outputs
|
||||
|
||||
def truncated_copies(self, blockchain, mempool, session) -> Iterator[Tuple[Input, bytes]]:
|
||||
"""
|
||||
For use in self.sign() & Validator.validate_block()
|
||||
|
||||
Based on:
|
||||
https://en.bitcoin.it/wiki/OP_CHECKSIG
|
||||
https://en.bitcoin.it/w/images/en/7/70/Bitcoin_OpCheckSig_InDetail.png
|
||||
http://davidederosa.com/basic-blockchain-programming/the-first-transaction-part-one/
|
||||
|
||||
:return: Generator containing pairs of (input, truncated_transaction hash)
|
||||
"""
|
||||
# For each input, the message to be signed/verified is a slightly modified version of the transaction
|
||||
for index, input in enumerate(self.inputs):
|
||||
# Copy transaction (to be modified)
|
||||
copy = deepcopy(self)
|
||||
|
||||
for c_index, c_input in enumerate(copy.inputs):
|
||||
# Truncate signature & public_key for all inputs
|
||||
c_input.signature = b""
|
||||
c_input.public_key = b""
|
||||
|
||||
# Set public_key (~subscript) of the input we are currently signing/verifying to the recipient-address
|
||||
# of the (unspent) output it refers to.
|
||||
if c_index == index:
|
||||
output = blockchain.txo(input.prev_tx_hash, input.txout_index, session) or \
|
||||
mempool.txo(input.prev_tx_hash, input.txout_index)
|
||||
c_input.public_key = output.address
|
||||
|
||||
# The resulting transaction (copy) is hashed, and the hash itself is the "message" that is signed/verified
|
||||
yield input, copy.hash
|
||||
|
||||
@property
|
||||
def raw(self) -> dict:
|
||||
return {
|
||||
"hash": self.hash.hex(),
|
||||
"version": self.version,
|
||||
"inputs": [input.raw for input in self.inputs],
|
||||
"outputs": [output.raw for output in self.outputs]
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def from_raw(version=None, inputs=None, outputs=None, hash=None):
|
||||
return Transaction(
|
||||
version=version,
|
||||
inputs=[Input.from_raw(**input) for input in inputs],
|
||||
outputs=[Output.from_raw(**output) for output in outputs]
|
||||
)
|
||||
|
||||
def json(self, indent=None) -> str:
|
||||
return json.dumps(self.raw, indent=indent)
|
||||
|
||||
def __str__(self, *args, **kwargs):
|
||||
return f"Transaction({self.json(indent=4)})"
|
||||
|
||||
|
||||
class CoinbaseTransaction(Transaction):
|
||||
__mapper_args__ = {
|
||||
"polymorphic_identity": True
|
||||
}
|
||||
|
||||
def __init__(self, address: bytes, value: int = consensus.block_reward, block_height: int = 0, coinbase=b"",
|
||||
version=1):
|
||||
"""
|
||||
A Coinbase Transaction which sends generation + fees to one address.
|
||||
|
||||
:param address: The wallet address who should receive the generated coins.
|
||||
:param value: The amount of coins sent to address. Coinbase transactions always contain outputs totalling the
|
||||
sum of the block reward plus all transaction fees collected from the other transactions in the same block.
|
||||
:param block_height: The height of the block this coinbase transaction is contained in.
|
||||
Why does the coinbase contain block height?
|
||||
Discussion in the CoinbaseInput-object.
|
||||
https://bitcoin.stackexchange.com/questions/30764
|
||||
https://bitcoin.stackexchange.com/questions/26910
|
||||
https://bitcoin.stackexchange.com/questions/5903
|
||||
:param coinbase: Can contain any arbitrary data (up to 100 bytes); it isn't used.
|
||||
"""
|
||||
super().__init__(
|
||||
version=version,
|
||||
|
||||
# coinbase transactions have exactly one input.
|
||||
inputs=[
|
||||
CoinbaseInput(
|
||||
block_height=block_height,
|
||||
coinbase=coinbase
|
||||
)
|
||||
],
|
||||
|
||||
# Technically, the coinbase transaction can split the reward amongst multiple addresses (i.e. have multiple
|
||||
# outputs) just like any other transaction, but we disregard this for simplicity.
|
||||
outputs=[
|
||||
Output(
|
||||
value=value,
|
||||
address=address
|
||||
)
|
||||
]
|
||||
)
|
||||
|
||||
@property
|
||||
def block_height(self) -> int:
|
||||
return self.inputs[0].block_height
|
||||
|
||||
@property
|
||||
def coinbase(self) -> bytes:
|
||||
return self.inputs[0].coinbase
|
||||
|
||||
def fee(self, blockchain, mempool, session) -> int:
|
||||
return 0
|
||||
|
||||
@staticmethod
|
||||
def from_raw(version=None, inputs=None, outputs=None, hash=None):
|
||||
input = CoinbaseInput.from_raw(**inputs[0])
|
||||
output = Output.from_raw(**outputs[0])
|
||||
return CoinbaseTransaction(
|
||||
address=output.address,
|
||||
value=output.value,
|
||||
block_height=input.block_height,
|
||||
coinbase=input.coinbase,
|
||||
version=version
|
||||
)
|
||||
|
||||
def __str__(self, *args, **kwargs):
|
||||
return f"CoinbaseTransaction({self.json(indent=4)})"
|
109
aucoin/util.py
Normal file
109
aucoin/util.py
Normal file
|
@ -0,0 +1,109 @@
|
|||
import hashlib
|
||||
import inspect
|
||||
import logging
|
||||
from itertools import zip_longest
|
||||
from pathlib import Path
|
||||
|
||||
from aucoin import config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def hash(data) -> bytes:
|
||||
"""
|
||||
Takes an arbitrary block of byte-data and calculates a fixed-size bit string (a digest).
|
||||
|
||||
:param data: Bytes to calculate hash for.
|
||||
:return: Digest of data.
|
||||
"""
|
||||
m = hashlib.sha256()
|
||||
m.update(data)
|
||||
return m.digest()
|
||||
|
||||
|
||||
def address(public_key) -> bytes:
|
||||
"""
|
||||
Takes a public key and returns the hash of it.. but might do something differently in the future.
|
||||
"""
|
||||
return hash(public_key)
|
||||
|
||||
|
||||
def grouper(iterable, n, fillvalue=None):
|
||||
"""
|
||||
Collect data into fixed-length chunks or blocks
|
||||
Example: grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
|
||||
From: https://docs.python.org/3/library/itertools.html#itertools-recipes
|
||||
|
||||
:param iterable: Iterable to split.
|
||||
:param n: Length of chunks,
|
||||
:param fillvalue: Value to use in case last chunk cannot be filled.
|
||||
:return: Iterable split into chunks of size n, with last chunk padded with fillvalue if necessary.
|
||||
"""
|
||||
args = [iter(iterable)] * n
|
||||
return zip_longest(*args, fillvalue=fillvalue)
|
||||
|
||||
|
||||
def merkle_root_hash(ls) -> bytes:
|
||||
"""
|
||||
A merkle tree is constructed by pairing each element of ls with one other element and then hashing them together.
|
||||
If ls contains an odd number of elements, the last element is duplicated (i.e. it is hashed with itself).
|
||||
The procedure is repeated recursively until one hash remains - the merkle root.
|
||||
|
||||
:param ls: List of elements to calculate Merkle root of.
|
||||
:return: Merkle root of ls.
|
||||
"""
|
||||
if len(ls) == 1:
|
||||
return hash(ls[0])
|
||||
|
||||
return merkle_root_hash([hash(left) + hash(right) for left, right in grouper(ls, 2, fillvalue=ls[-1])])
|
||||
|
||||
|
||||
def humanize(n, precision=2, prefix="bin", suffix="B") -> str:
|
||||
"""
|
||||
Return a humanized string representation of a number (of bytes).
|
||||
Adapted from Doug Latornell - http://code.activestate.com/recipes/577081/
|
||||
"""
|
||||
abbrevs = {
|
||||
"dec": [
|
||||
(1000 ** 5, 'P' + suffix),
|
||||
(1000 ** 4, 'T' + suffix),
|
||||
(1000 ** 3, 'G' + suffix),
|
||||
(1000 ** 2, 'M' + suffix),
|
||||
(1000 ** 1, 'k' + suffix),
|
||||
(1000, suffix)
|
||||
],
|
||||
"bin": [
|
||||
(1 << 50, 'Pi' + suffix),
|
||||
(1 << 40, 'Ti' + suffix),
|
||||
(1 << 30, 'Gi' + suffix),
|
||||
(1 << 20, 'Mi' + suffix),
|
||||
(1 << 10, 'ki' + suffix),
|
||||
(1, suffix)
|
||||
]
|
||||
}
|
||||
|
||||
if n == 1:
|
||||
return "1 " + suffix
|
||||
|
||||
for factor, suffix in abbrevs[prefix]:
|
||||
if n >= factor:
|
||||
break
|
||||
# noinspection PyUnboundLocalVariable
|
||||
return '%.*f %s' % (precision, n / factor, suffix)
|
||||
|
||||
|
||||
def get_default_args(func):
|
||||
"""
|
||||
Return function's default argument values.
|
||||
Based on https://stackoverflow.com/a/12627202
|
||||
"""
|
||||
return {
|
||||
k: v.default
|
||||
for k, v in inspect.signature(func).parameters.items()
|
||||
if v.default is not inspect.Parameter.empty
|
||||
}
|
||||
|
||||
|
||||
def make_data_dirs(*paths):
|
||||
for path in paths:
|
||||
Path.mkdir(config.data_dir.joinpath(path), parents=True, exist_ok=True)
|
635
aucoin/validation.py
Normal file
635
aucoin/validation.py
Normal file
|
@ -0,0 +1,635 @@
|
|||
import logging
|
||||
from datetime import datetime
|
||||
|
||||
from cryptography.hazmat.backends import default_backend
|
||||
from cryptography.hazmat.primitives import serialization
|
||||
|
||||
from aucoin import consensus
|
||||
from aucoin import dsa
|
||||
from aucoin import util
|
||||
from aucoin.block import Block
|
||||
from aucoin.blockchain import Blockchain
|
||||
from aucoin.database import session_scope
|
||||
from aucoin.exceptions import InvalidBlockException, InvalidTransactionException, InvalidException, OrphanException, \
|
||||
OrphanTransactionException, OrphanBlockException
|
||||
from aucoin.mempool import Mempool
|
||||
from aucoin.network import Network
|
||||
from aucoin.transactions import CoinbaseTransaction, Transaction
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Validator(object):
|
||||
def __init__(self, core, blockchain: Blockchain, mempool: Mempool, network: Network):
|
||||
"""
|
||||
The validator object will usually be instantiated in the core and shared between all the sub-modules.
|
||||
The validator not only validates, but also adds blocks to the blockchain and incoming transactions to the
|
||||
mempool. It has this extra responsibility because the task of adding blocks to the blockchain is rather
|
||||
complicated; it may involve reorganising the entire chain if it turns out a side-branch should become the main-
|
||||
branch. Figuring this out requires a lot of checks, and as such we saw it fit to let the validator do this job.
|
||||
|
||||
:param core: The core object is needed to access its lock.
|
||||
:param blockchain: The blockchain where blocks will be added to and validated against.
|
||||
:param mempool: The pool of unconfirmed transactions to which incoming transactions will be added (if valid).
|
||||
:param network: The network used to request missing blocks/transactions.
|
||||
"""
|
||||
self.core = core
|
||||
self.blockchain = blockchain
|
||||
self.mempool = mempool
|
||||
self.network = network
|
||||
|
||||
self._new_block_subscribers = set()
|
||||
self._new_header_block_subscribers = set()
|
||||
self._new_transaction_subscribers = set()
|
||||
|
||||
def add_transaction(self, transaction: Transaction, reject_duplicate_orphan=True):
|
||||
"""
|
||||
Validate a standalone (not part of a block) transaction; add to the mempool if valid.
|
||||
|
||||
:param transaction: The transaction to validate.
|
||||
:param reject_duplicate_orphan: Should the transaction be rejected if it exists in orphans?
|
||||
"""
|
||||
with self.core.lock:
|
||||
logger.debug("Adding transaction %s", transaction)
|
||||
with session_scope() as session:
|
||||
self._add_transaction(transaction, session, reject_duplicate_orphan=reject_duplicate_orphan)
|
||||
|
||||
# Transaction successfully added; Transaction is valid! Let subscribers know
|
||||
self.notify_new_transaction(transaction)
|
||||
|
||||
# Process orphan transactions that uses this transaction as one of its inputs
|
||||
logger.debug("Processing orphan transactions that uses this transaction..")
|
||||
self.process_orphans(
|
||||
orphans=self.mempool.orphans,
|
||||
orphan_check=lambda orphan: any(transaction.hash == input.prev_tx_hash for input in orphan.inputs),
|
||||
add_function=self.add_transaction
|
||||
)
|
||||
|
||||
def add_block(self, block: Block, reject_duplicate_orphan=True):
|
||||
"""
|
||||
Validate a block; add to blockchain (not necessarily main branch) if valid. May reorganise parts of the
|
||||
blockchain and update its header depending on which branch the incoming block extends.
|
||||
|
||||
:param block: The block to validate.
|
||||
:param reject_duplicate_orphan: Should the block be rejected if it exists in orphans?
|
||||
"""
|
||||
with self.core.lock:
|
||||
logger.debug("Adding block %s", block)
|
||||
with session_scope() as session:
|
||||
old_header = self.blockchain.header(session).hash
|
||||
self._add_block(block, session, reject_duplicate_orphan=reject_duplicate_orphan)
|
||||
|
||||
# Block successfully added; Block is valid (but transactions might not be)! Let subscribers know
|
||||
self.notify_new_block(block)
|
||||
|
||||
# Did we get a new header? If so let subscribers know
|
||||
with session_scope() as session:
|
||||
header = self.blockchain.header(session)
|
||||
if header.hash != old_header:
|
||||
self.notify_new_header_block(header)
|
||||
|
||||
# Process orphan blocks that has this one as its hash_prev_block
|
||||
logger.debug("Processing orphan blocks that uses this block..")
|
||||
self.process_orphans(
|
||||
orphans=self.blockchain.orphans,
|
||||
orphan_check=lambda orphan: orphan.hash_prev_block == block.hash,
|
||||
add_function=self.add_block
|
||||
)
|
||||
|
||||
# Process orphan transactions that uses a transaction from this block as one of its inputs
|
||||
logger.debug("Processing orphan transactions that uses a transaction from this block..")
|
||||
block_transaction_hashes = set(transaction.hash for transaction in block.transactions)
|
||||
self.process_orphans(
|
||||
orphans=self.mempool.orphans,
|
||||
orphan_check=lambda orphan: any(input.prev_tx_hash in block_transaction_hashes for input in orphan.inputs),
|
||||
add_function=self.add_transaction
|
||||
)
|
||||
|
||||
def process_orphans(self, orphans: dict, orphan_check: callable, add_function: callable):
|
||||
"""
|
||||
Try to validate and add all orphans from the collection that pass the orphan_check. If any of the orphans still
|
||||
have missing dependencies request them from the network.
|
||||
|
||||
:param orphans: The collection of orphans (mempool.orphans or blockchain.orphans).
|
||||
:param orphan_check: The function used to determine if this orphan has been adopted.
|
||||
:param add_function: The function used to validate and add orphan (self.add_block or self.add_transaction).
|
||||
"""
|
||||
for hash, orphan in orphans.copy().items():
|
||||
if orphan_check(orphan):
|
||||
try:
|
||||
add_function(orphan, reject_duplicate_orphan=False)
|
||||
|
||||
except InvalidException as e:
|
||||
logger.debug(e)
|
||||
try:
|
||||
del orphans[hash]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
except OrphanBlockException as e:
|
||||
if e.missing not in orphans:
|
||||
logger.debug("Requesting missing block: %s", e.missing.hex())
|
||||
self.network.request_block(e.missing)
|
||||
|
||||
except OrphanTransactionException as e:
|
||||
if e.missing not in orphans:
|
||||
logger.debug("Requesting missing transaction: %s", e.missing.hex())
|
||||
self.network.request_transaction(e.missing)
|
||||
|
||||
logger.debug("Done processing orphans")
|
||||
|
||||
def _add_transaction(self, transaction: Transaction, session, reject_duplicate_orphan=True):
|
||||
"""
|
||||
Validate a standalone (not part of a block) transaction; add to the mempool if valid.
|
||||
|
||||
Based on:
|
||||
https://en.bitcoin.it/wiki/Protocol_rules#.22tx.22_messages
|
||||
validation.cpp:AcceptToMemoryPoolWorker() -- called by network
|
||||
|
||||
:param transaction: The transaction to validate and add to mempool.
|
||||
:param session: Database session.
|
||||
:param reject_duplicate_orphan: Should the transaction be rejected if it exists in orphans?
|
||||
"""
|
||||
# Perform non context-aware (i.e. "syntax") checks
|
||||
SyntaxChecker.check_transaction(transaction)
|
||||
|
||||
transaction_hash = transaction.hash # cache hash calculation
|
||||
|
||||
# Coinbase transactions are only valid in a block, not as standalone transactions
|
||||
if isinstance(transaction, CoinbaseTransaction):
|
||||
raise InvalidTransactionException("Standalone transaction cannot be coinbase")
|
||||
|
||||
# Reject if transaction already exists in blockchain
|
||||
if self.blockchain.transaction(transaction_hash, session):
|
||||
raise InvalidTransactionException("Transaction already exists in blockchain's main branch")
|
||||
|
||||
# Reject if transaction already exists in mempool
|
||||
# Note: this doesn't make the transaction invalid, it simply means we already have it in the pool
|
||||
if self.mempool.transaction(transaction_hash):
|
||||
raise InvalidTransactionException("Transaction already exists in mempool")
|
||||
|
||||
# Reject if transaction already exists in orphans if reject_duplicate_orphan is set
|
||||
if reject_duplicate_orphan and transaction_hash in self.mempool.orphans:
|
||||
raise InvalidBlockException("Already exists in orphans")
|
||||
|
||||
# Reject if transaction conflicts with any other transaction in the mempool: for each input, if the
|
||||
# referenced output is spent by (has same input as) another transaction in the mempool
|
||||
if self.mempool.conflicts(transaction):
|
||||
raise InvalidTransactionException("Transaction conflicts with mempool: one or more input's referenced "
|
||||
"output is spent by another transaction in the mempool")
|
||||
|
||||
# For each input, check that the referenced output transaction exists in either the blockchain or mempool.
|
||||
# If the output transaction is missing for any input this will be an orphan transaction.
|
||||
for input in transaction.inputs:
|
||||
if not (self.blockchain.transaction(input.prev_tx_hash, session)
|
||||
or self.mempool.transaction(input.prev_tx_hash)):
|
||||
self.mempool.orphans[transaction_hash] = transaction
|
||||
raise OrphanTransactionException(f"Referenced output transaction {input.prev_tx_hash.hex()} is missing",
|
||||
missing=input.prev_tx_hash)
|
||||
|
||||
# Validate transaction inputs
|
||||
self._validate_transaction_inputs(transaction, session, allow_mempool=True)
|
||||
|
||||
# Add transaction to mempool
|
||||
self.mempool[transaction_hash] = transaction
|
||||
|
||||
# Remove transaction from orphans
|
||||
try:
|
||||
del self.mempool.orphans[transaction_hash]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
def _validate_transaction_inputs(self, transaction: Transaction, session, allow_mempool=False):
|
||||
"""
|
||||
Validate transaction inputs in the context of the blockchain's current main branch.
|
||||
|
||||
Based on:
|
||||
consensus/tx_verify.cpp:Consensus::CheckTxInputs()
|
||||
validation.cpp:CheckInputs()
|
||||
|
||||
:param transaction: The transaction for which to validate inputs.
|
||||
:param session: A database session.
|
||||
:param allow_mempool: Should we also check mempool when checking if the referenced outputs exist?
|
||||
"""
|
||||
# For each input, check that the referenced output exists in the UTXO set of the blockchain.
|
||||
# If allow_mempool is set, the requirement is relaxed to also allow the referenced output to exist in the
|
||||
# mempool in addition to the main branch.
|
||||
if not all(self.blockchain.utxo(input.prev_tx_hash, input.txout_index, session)
|
||||
or (allow_mempool and self.mempool.utxo(input.prev_tx_hash, input.txout_index))
|
||||
for input in transaction.inputs):
|
||||
raise InvalidTransactionException("Referenced output already spent (not in blockchain/mempool UTXO) "
|
||||
"for one or more inputs")
|
||||
|
||||
# Reject if the sum of input values < sum of output values (the potential difference is the transaction fee)
|
||||
# In other words, the fee must be positive.
|
||||
fee = transaction.fee(self.blockchain, self.mempool, session)
|
||||
if fee < 0:
|
||||
raise InvalidTransactionException("Sum of input values < sum of output values (negative fee)")
|
||||
|
||||
# Reject if transaction fee is too low
|
||||
if fee < consensus.tx_min_fee:
|
||||
raise InvalidTransactionException("Transaction fee too low")
|
||||
|
||||
# Check that the signature is valid for all inputs in the transaction
|
||||
if not all(dsa.verify(serialization.load_der_public_key(input.public_key, backend=default_backend()),
|
||||
copy_hash,
|
||||
input.signature)
|
||||
for input, copy_hash in transaction.truncated_copies(self.blockchain, self.mempool, session)):
|
||||
raise InvalidTransactionException("Invalid signature for one or more inputs")
|
||||
|
||||
# Check for each input, that the public key indeed matches the address of the output it is spending
|
||||
if not all((self.blockchain.utxo(input.prev_tx_hash, input.txout_index, session)
|
||||
or self.mempool.utxo(input.prev_tx_hash, input.txout_index)).address
|
||||
== util.address(input.public_key)
|
||||
for input in transaction.inputs):
|
||||
raise InvalidTransactionException("Public key doesn't match address of the output it is spending for one "
|
||||
"or more inputs")
|
||||
|
||||
def _add_block(self, block: Block, session, reject_duplicate_orphan=True):
|
||||
"""
|
||||
Validate a block; add to blockchain (not necessarily main branch) if valid. May reorganise parts of the
|
||||
blockchain and update its header depending on which branch the incoming block extends.
|
||||
|
||||
Based on:
|
||||
https://en.bitcoin.it/wiki/Protocol_rules#.22block.22_messages
|
||||
validation.cpp:ProcessNewBlock() -- called by network
|
||||
|
||||
CChainState::AcceptBlock()
|
||||
CChainState::AcceptBlockHeader()
|
||||
|
||||
validation.cpp:ContextualCheckBlockHeader()
|
||||
validation.cpp:ContextualCheckBlock()
|
||||
|
||||
:param block: The block to validate and add to the blockchain.
|
||||
:param session: Database session.
|
||||
:param reject_duplicate_orphan: Should the block be rejected if it exists in orphans?
|
||||
"""
|
||||
# Perform non context-aware (i.e. "syntax") checks. This also syntax-checks all the transactions
|
||||
SyntaxChecker.check_block(block)
|
||||
|
||||
block_hash = block.hash # cache hash calculation
|
||||
|
||||
# Reject if duplicate of block in blockchain regardless of branch
|
||||
# Note: this doesn't make the block invalid, it simply means we already know of it
|
||||
if self.blockchain.block(block_hash, session):
|
||||
raise InvalidBlockException("Already exists in blockchain")
|
||||
|
||||
# Reject if duplicate of block in orphans if reject_duplicate_orphan is set
|
||||
if reject_duplicate_orphan and block_hash in self.blockchain.orphans:
|
||||
raise InvalidBlockException("Already exists in orphans")
|
||||
|
||||
prev_block = self.blockchain.block(block.hash_prev_block, session)
|
||||
|
||||
# Check if prev block is in main branch or any side branches. If not, this will be an orphan block
|
||||
if not prev_block:
|
||||
self.blockchain.orphans[block_hash] = block
|
||||
raise OrphanBlockException(f"Referenced prev_block {block.hash_prev_block.hex()} is missing",
|
||||
missing=block.hash_prev_block)
|
||||
|
||||
# Check that target matches the consensus difficulty rules
|
||||
if not block.target == consensus.required_target(block, self.blockchain, session):
|
||||
raise InvalidBlockException("Target does not match the difficulty rules")
|
||||
|
||||
# Reject if timestamp is more than block_max_future_time in the future
|
||||
if datetime.fromtimestamp(block.timestamp) > datetime.utcnow() + consensus.block_max_future_time:
|
||||
raise InvalidBlockException("Block timestamp must not be more than block_max_future_time in the future")
|
||||
|
||||
# Reject if timestamp is equal to or less than the median time of the last n (as specified by consensus)
|
||||
if block.timestamp <= self.blockchain.median_timestamp(block.hash_prev_block, session):
|
||||
raise InvalidBlockException("Timestamp is before or equal to median time of the last n blocks")
|
||||
|
||||
# Check that block height is equal to previous block's height + 1
|
||||
if not block.height == prev_block.height + 1:
|
||||
raise InvalidBlockException("Block height is not equal to previous block's height + 1")
|
||||
|
||||
# Block is valid, but the transactions may not be!
|
||||
# Checks that depend on the UTXO set are done during an eventual blockchain reorganisation if the block is
|
||||
# added to the main branch. For example we can't calculate if the amount the miner pays himself in coinbase
|
||||
# is correct here since the transactions in this block (as well as their fees) may not exist in our current
|
||||
# main branch.
|
||||
|
||||
# Add to blockchain: block is always added as a side branch even though it might extend the main branch
|
||||
self.blockchain.add(block, session, main_branch=False)
|
||||
|
||||
# Remove block from orphans
|
||||
try:
|
||||
del self.blockchain.orphans[block_hash]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
# Mark the best chain of the blockchain as the main branch
|
||||
self.make_best_chain_main_branch(session)
|
||||
|
||||
logger.debug("Make best chain main branch returned")
|
||||
|
||||
def make_best_chain_main_branch(self, session):
|
||||
"""
|
||||
Mark the best chain of the blockchain as the main branch. Called after add_block adds block as a side branch.
|
||||
There are three cases for the new block:
|
||||
- It extends the current main branch; this will mark the block as part of the main branch and remove its
|
||||
transactions from the mempool.
|
||||
- It extends a side branch, but does not add enough work to make it become the new main branch; in this case
|
||||
the best chain is already the main branch and there is nothing to do.
|
||||
- It extends a side branch, adding enough work to make it the new main branch; this case requires most work;
|
||||
first we need to find the fork block from which the side branch branches off of the main one, then roll
|
||||
back the main branch to this point, and then add each block from the side branch to the main one. Like
|
||||
case 1 of extending the main branch, we will need to remove matching transactions from the mempool after
|
||||
adding each block, furthermore, each transaction from the old main branch needs to be added to the mempool.
|
||||
|
||||
Based on:
|
||||
CChainState::ActivateBestChain()
|
||||
CChainState::ActivateBestChainStep()
|
||||
|
||||
CChainState::DisconnectTip()
|
||||
CChainState::DisconnectBlock()
|
||||
|
||||
:param session: Database session.
|
||||
"""
|
||||
disconnected_transactions = set()
|
||||
|
||||
header_block = self.blockchain.header(session)
|
||||
most_work_block = self.blockchain.block_most_work(session)
|
||||
|
||||
logger.debug("Header block: %s (height: %s)", header_block.hash.hex(), header_block.height)
|
||||
logger.debug("Most work block: %s (height: %s)", most_work_block.hash.hex(), most_work_block.height)
|
||||
|
||||
# Done if header is already the block with the most work
|
||||
if header_block == most_work_block:
|
||||
return
|
||||
|
||||
logger.debug("Header != most work block. REORG")
|
||||
|
||||
# Find the block from where the chain with most_work_block forks off the main branch.
|
||||
# Note: fork_block will be the current header_block if most_work_block builds upon the current main branch.
|
||||
fork_block = self.blockchain.find_fork(most_work_block.hash, session)
|
||||
|
||||
logger.debug("Fork block: %s (height: %s)", fork_block.hash.hex(), fork_block.height)
|
||||
|
||||
# Disconnect from the main branch all blocks from header_block down to fork_block.
|
||||
# Note: won't remove any blocks if most_work_block builds upon the current main branch
|
||||
while header_block != fork_block:
|
||||
self.disconnect_block(header_block, session)
|
||||
|
||||
# The block's transactions are saved so that they can be added back in to the mempool once we are done
|
||||
# reorganising. The coinbase transaction is not saved since it is only valid in the context of this block.
|
||||
disconnected_transactions.update(header_block.transactions[1:])
|
||||
|
||||
header_block = self.blockchain.header(session)
|
||||
|
||||
# Find the blocks to be added to the main branch (blocks from most_work_block down to fork_block).
|
||||
# Note: most_work_block will be the only one if it builds upon the main branch.
|
||||
blocks = []
|
||||
block = most_work_block
|
||||
while block != fork_block:
|
||||
blocks.append(block)
|
||||
block = self.blockchain.block(block.hash_prev_block, session)
|
||||
|
||||
# Unlike the blockchain, the mempool doesn't have a session which can be rolled back. Therefore, we make a
|
||||
# copy of the mempool to mess around with. If all blocks were connected successfully we override
|
||||
# self.mempool with this copy, otherwise we simply discard it.
|
||||
mempool = self.mempool.copy()
|
||||
|
||||
# Connect the blocks to the main branch
|
||||
for block in reversed(blocks):
|
||||
self.connect_block(block, mempool, session)
|
||||
|
||||
# Replace the real mempool with our temporary one
|
||||
self.mempool.clear()
|
||||
self.mempool.update(mempool)
|
||||
|
||||
# Add transactions from disconnected blocks back into the mempool
|
||||
for transaction in disconnected_transactions:
|
||||
try:
|
||||
self.add_transaction(transaction)
|
||||
except (InvalidTransactionException, OrphanException):
|
||||
pass # It's okay if some transactions are no longer valid given the new main branch; discard them.
|
||||
|
||||
def disconnect_block(self, block: Block, session):
|
||||
"""
|
||||
Disconnect current header block from the main branch, making its prev_block the new header.
|
||||
|
||||
:param block: The block to disconnect from the main branch. Warranty void if not current header.
|
||||
:param session: Database session.
|
||||
"""
|
||||
logger.debug("Disconnecting %s (height: %s)", block.hash.hex(), block.height)
|
||||
|
||||
# Mark the transaction's referenced outputs unspent in case the block is added again in the future
|
||||
for transaction in block.transactions[1:]:
|
||||
for input in transaction.inputs:
|
||||
output = self.blockchain.txo(input.prev_tx_hash, input.txout_index, session)
|
||||
output._spent = False
|
||||
|
||||
block._main_branch = False
|
||||
|
||||
# Set prev_block as new blockchain header
|
||||
prev_block = self.blockchain.block(block.hash_prev_block, session)
|
||||
self.blockchain.set_header(prev_block, session)
|
||||
|
||||
logger.debug("New header block is: %s (height: %s)", prev_block.hash.hex(), prev_block.height)
|
||||
|
||||
def connect_block(self, block: Block, mempool: Mempool, session):
|
||||
"""
|
||||
Validate and connect block to the main branch, making it the new header.
|
||||
|
||||
Based on:
|
||||
CChainState::ConnectTip()
|
||||
CChainState::ConnectBlock()
|
||||
validation.cpp:UpdateCoins()
|
||||
|
||||
:param block: The block to validate and connect to the main branch.
|
||||
:param mempool: The mempool (copy) from which we remove conflicting transactions.
|
||||
:param session: Database session.
|
||||
"""
|
||||
logger.debug("Connecting %s (height: %s)", block.hash.hex(), block.height)
|
||||
logger.debug("Connecting %s", block)
|
||||
|
||||
# Reject if any transaction's hash is duplicate of another transaction in the main branch
|
||||
if any(self.blockchain.transaction(transaction.hash, session)
|
||||
for transaction in block.transactions):
|
||||
raise InvalidBlockException("One or more transactions overwrite earlier transaction in main branch "
|
||||
"(duplicate hash)")
|
||||
|
||||
# Set block as main branch and blockchain's new header before validation transaction inputs. This is done
|
||||
# because an input can reference an output created in the same block. If validation fails it will be reverted.
|
||||
block._main_branch = True
|
||||
self.blockchain.set_header(block, session)
|
||||
|
||||
# Check that the coinbase output is less than or equal to the block reward plus sum of transaction fees
|
||||
coinbase = block.transactions[0]
|
||||
block_fee = sum(transaction.fee(self.blockchain, self.mempool, session) for transaction in block.transactions)
|
||||
if not sum(output.value for output in coinbase.outputs) <= consensus.block_reward + block_fee:
|
||||
raise InvalidBlockException("Coinbase output value too large")
|
||||
|
||||
for transaction in block.transactions[1:]:
|
||||
# Validate inputs; these are the transaction checks that depend on the context of the main branch
|
||||
self._validate_transaction_inputs(transaction, session)
|
||||
|
||||
# Mark referenced outputs spent
|
||||
for input in transaction.inputs:
|
||||
output = self.blockchain.utxo(input.prev_tx_hash, input.txout_index, session)
|
||||
output._spent = True
|
||||
|
||||
# Remove conflicting transactions from the mempool. A transaction is considered conflicting if any of its
|
||||
# referenced outputs are no longer unspent.
|
||||
for hash, transaction in mempool.copy().items():
|
||||
if not all(self.blockchain.utxo(input.prev_tx_hash, input.txout_index, session)
|
||||
for input in transaction.inputs):
|
||||
del mempool[hash]
|
||||
|
||||
# Subscribers and events
|
||||
|
||||
def subscribe_new_block(self, callback):
|
||||
self._new_block_subscribers.add(callback)
|
||||
|
||||
def notify_new_block(self, block):
|
||||
for subscriber in self._new_block_subscribers:
|
||||
subscriber(block)
|
||||
|
||||
def subscribe_new_header_block(self, callback):
|
||||
self._new_header_block_subscribers.add(callback)
|
||||
|
||||
def notify_new_header_block(self, block):
|
||||
for subscriber in self._new_header_block_subscribers:
|
||||
subscriber(block)
|
||||
|
||||
def subscribe_new_transaction(self, callback):
|
||||
self._new_transaction_subscribers.add(callback)
|
||||
|
||||
def notify_new_transaction(self, transaction):
|
||||
for subscriber in self._new_transaction_subscribers:
|
||||
subscriber(transaction)
|
||||
|
||||
|
||||
class SyntaxChecker(object):
|
||||
@staticmethod
|
||||
def check_transaction(transaction: Transaction):
|
||||
"""
|
||||
Basic check of a transaction which doesn't depend on context (blockchain/mempool).
|
||||
|
||||
Based on:
|
||||
https://en.bitcoin.it/wiki/Protocol_rules#.22tx.22_messages
|
||||
consensus/tx_verify.cpp:CheckTransaction()
|
||||
|
||||
:param transaction: The transaction to check.
|
||||
"""
|
||||
# Input list must not be empty
|
||||
if not transaction.inputs:
|
||||
raise InvalidTransactionException("Input list is empty")
|
||||
|
||||
# Output list must not be empty
|
||||
if not transaction.outputs:
|
||||
raise InvalidTransactionException("Output list is empty")
|
||||
|
||||
# Each output value must be positive.
|
||||
# In Bitcoin the output value must be 0 <= value <= 21 million; zero-outputs are tolerated to allow storing data
|
||||
# in the blockchain somehow - we don't need that. Also, why do we need to put a maximum? If the input doesn't
|
||||
# contain a sufficient amount of coins, the output is invalid anyway.
|
||||
if not all(output.value > 0 for output in transaction.outputs):
|
||||
raise InvalidTransactionException("Negative or zero output value for one or more outputs")
|
||||
|
||||
# Check that size in bytes <= MAX_BLOCK_SIZE
|
||||
if not transaction.size <= consensus.block_max_size:
|
||||
raise InvalidTransactionException("Transaction-size larger than max block size")
|
||||
|
||||
# Reject if duplicate inputs (two inputs spending the same output)
|
||||
# Each input object is unique since its signature signs all the inputs in the transaction *except itself*, so we
|
||||
# can't simply check for duplicate input-objects.
|
||||
if len(transaction.inputs) != len(set((input.prev_tx_hash, input.txout_index) for input in transaction.inputs)):
|
||||
raise InvalidTransactionException("Transaction contains duplicate inputs")
|
||||
|
||||
# Run coinbase-specific checks if transaction is coinbase
|
||||
if isinstance(transaction, CoinbaseTransaction):
|
||||
SyntaxChecker._check_coinbase_transaction(transaction)
|
||||
|
||||
# Run non-coinbase checks if it isn't
|
||||
else:
|
||||
# Reject if any of the inputs have prev_tx_hash=0x00...00 (coinbase transactions)
|
||||
if any(input.prev_tx_hash == bytes(32) for input in transaction.inputs):
|
||||
raise InvalidTransactionException("prev_tx_hash is 0x00...00 for one or more inputs in non-coinbase tx")
|
||||
|
||||
@staticmethod
|
||||
def _check_coinbase_transaction(transaction: CoinbaseTransaction):
|
||||
"""
|
||||
Basic check of a coinbase transaction which doesn't depend on context (blockchain/mempool).
|
||||
WARNING: This check is INSUFFICIENT do NOT call this function manually - it is called from check_transaction().
|
||||
|
||||
Based on:
|
||||
https://en.bitcoin.it/wiki/Protocol_rules#.22tx.22_messages
|
||||
https://en.bitcoin.it/wiki/Protocol_rules#.22block.22_messages
|
||||
consensus/tx_verify.cpp:CheckTransaction()
|
||||
primitives/transaction.h:IsCoinBase()
|
||||
|
||||
:param transaction: The coinbase transaction to check.
|
||||
"""
|
||||
# Check that there is only one input
|
||||
if not len(transaction.inputs) == 1:
|
||||
raise InvalidTransactionException("Coinbase transactions may only have one input")
|
||||
|
||||
# Check that there is only one output
|
||||
if not len(transaction.outputs) == 1:
|
||||
raise InvalidTransactionException("Coinbase transactions may only have one output")
|
||||
|
||||
input = transaction.inputs[0]
|
||||
|
||||
# Check that the single input has prev_tx_hash=0x00...00
|
||||
if not input.prev_tx_hash == bytes(32):
|
||||
raise InvalidTransactionException("Coinbase transaction must have prev_tx_hash = 0x00...00")
|
||||
|
||||
# .. and txout_index=0
|
||||
if not input.txout_index == 0:
|
||||
raise InvalidTransactionException("Coinbase transaction must have txout_index = 0")
|
||||
|
||||
# Check that the size of the coinbase parameter doesn't exceed coinbase_max_size.
|
||||
if not len(input.coinbase) <= consensus.tx_coinbase_max_size:
|
||||
raise InvalidTransactionException("Size of coinbase parameter exceeds coinbase_max_size")
|
||||
|
||||
@staticmethod
|
||||
def check_block(block: Block):
|
||||
"""
|
||||
Basic check of a block which doesn't depend on context (blockchain/mempool).
|
||||
|
||||
Based on:
|
||||
https://en.bitcoin.it/wiki/Protocol_rules#.22block.22_messages
|
||||
validation.cpp:CheckBlockHeader()
|
||||
pow.cpp:CheckProofOfWork()
|
||||
validation.cpp:CheckBlock()
|
||||
|
||||
:param block: The block to check.
|
||||
"""
|
||||
# Check block hash satisfies claimed target proof of work
|
||||
if not block.hash <= block.target:
|
||||
raise InvalidBlockException("Block hash doesn't satisfy claimed target proof of work")
|
||||
|
||||
# Transaction list must be non-empty
|
||||
if not block.transactions:
|
||||
raise InvalidBlockException("Transaction list must be non-empty")
|
||||
|
||||
# Check merkle root hash matches transactions list
|
||||
if block.merkle_root_hash != block.calculate_merkle(update=False):
|
||||
raise InvalidBlockException("Incorrect Merkle root hash")
|
||||
|
||||
# Check that size in bytes <= MAX_BLOCK_SIZE
|
||||
if not block.size <= consensus.block_max_size:
|
||||
raise InvalidBlockException("Block-size larger than max block size")
|
||||
|
||||
# First transaction must be coinbase..
|
||||
if not isinstance(block.transactions[0], CoinbaseTransaction):
|
||||
raise InvalidBlockException("The first transaction must be coinbase")
|
||||
|
||||
# .. the rest must not be
|
||||
if any(isinstance(tx, CoinbaseTransaction) for tx in block.transactions[1:]):
|
||||
raise InvalidBlockException("Only the first transaction may be coinbase")
|
||||
|
||||
# Check that the signature is valid
|
||||
if not dsa.verify(serialization.load_der_public_key(block.public_key, backend=default_backend()),
|
||||
block.truncated_header,
|
||||
block.signature):
|
||||
raise InvalidBlockException("Invalid block signature")
|
||||
|
||||
# Check that the public key indeed matches the address of the coinbase
|
||||
if not block.transactions[0].outputs[0].address == util.address(block.public_key):
|
||||
raise InvalidBlockException("Public key doesn't match output address of the coinbase")
|
||||
|
||||
# Check syntax of all transactions in block
|
||||
for transaction in block.transactions:
|
||||
SyntaxChecker.check_transaction(transaction)
|
314
aucoin/wallet.py
Normal file
314
aucoin/wallet.py
Normal file
|
@ -0,0 +1,314 @@
|
|||
import itertools
|
||||
import json
|
||||
import logging
|
||||
import random
|
||||
from collections import namedtuple
|
||||
from copy import copy
|
||||
from datetime import datetime
|
||||
from getpass import getpass
|
||||
from typing import KeysView
|
||||
|
||||
from cryptography.hazmat.backends import default_backend
|
||||
from cryptography.hazmat.primitives import serialization
|
||||
from cryptography.hazmat.primitives.serialization import Encoding, PublicFormat
|
||||
|
||||
from aucoin import config
|
||||
from aucoin import dsa
|
||||
from aucoin import util
|
||||
from aucoin.blockchain import Blockchain
|
||||
from aucoin.database import session_scope
|
||||
from aucoin.dsa import Keypair
|
||||
from aucoin.mempool import Mempool
|
||||
from aucoin.transactions import Transaction, Output, Input, CoinbaseTransaction
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
Balance = namedtuple("Balance", ("confirmed", "unconfirmed"))
|
||||
|
||||
|
||||
class Wallet(object):
|
||||
def __init__(self, blockchain: Blockchain, mempool: Mempool):
|
||||
self.blockchain = blockchain
|
||||
self.mempool = mempool
|
||||
|
||||
self.keys = {} # {address: Keypair(private, public), ...}
|
||||
self.sent_outputs = set() # {(transaction_hash, amount, receiver_address), ...}
|
||||
self.password = None
|
||||
self._new_transaction_subscribers = set()
|
||||
|
||||
try:
|
||||
self.load_keys()
|
||||
self.load_sent_outputs()
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
@property
|
||||
def addresses(self) -> KeysView:
|
||||
"""
|
||||
:return: Collection of addresses this wallet controls the private keys for.
|
||||
"""
|
||||
return self.keys.copy().keys() # .copy() allows others to iterate addresses without worry
|
||||
|
||||
@property
|
||||
def balance(self) -> Balance:
|
||||
"""
|
||||
The sum of unspent transactions outputs for which this wallet has the private key.
|
||||
The confirmed balance consists of the value of the wallet's UTXOs in the blockchain's main branch.
|
||||
The unconfirmed balance consists of the value of the wallet's UTXOs in the mempool. This balance is returned as
|
||||
the _change_ to our balance after applying these transactions (e.g. -10 or +200).
|
||||
|
||||
Information regarding this:
|
||||
https://bitcoin.stackexchange.com/questions/22997/how-is-a-wallets-balance-computed
|
||||
https://bitcoin.stackexchange.com/questions/67357/what-is-the-big-o-complexity-to-retrieve-the-balance-for-a-user/
|
||||
|
||||
:return: Balance(confirmed, unconfirmed) named-tuple.
|
||||
"""
|
||||
with session_scope() as session:
|
||||
confirmed = sum(output.value for output in self.blockchain.utxos_of_addresses(self.addresses, session))
|
||||
|
||||
# Find the value of all outputs sent *to* one of our addresses
|
||||
unconfirmed_incoming = sum(output.value for output in self.mempool.txos_of_addresses(self.addresses))
|
||||
|
||||
# Find the value of all outputs sent *from* one of our addresses
|
||||
unconfirmed_outgoing = 0
|
||||
for transaction in self.mempool.values():
|
||||
for input in transaction.inputs:
|
||||
output = self.blockchain.txo(input.prev_tx_hash, input.txout_index, session) or \
|
||||
self.mempool.txo(input.prev_tx_hash, input.txout_index)
|
||||
if output.address in self.addresses:
|
||||
unconfirmed_outgoing += output.value
|
||||
|
||||
return Balance(confirmed, unconfirmed_incoming - unconfirmed_outgoing)
|
||||
|
||||
def history(self, limit: int = None) -> list:
|
||||
"""
|
||||
:param limit: Number of history entries to return.
|
||||
:return: List of incoming/outgoing transactions as well as mined blocks.
|
||||
"""
|
||||
history = []
|
||||
|
||||
with session_scope() as session:
|
||||
header = self.blockchain.header(session)
|
||||
|
||||
# Received coins can be calculated based on the state of the blockchain/mempool
|
||||
for txo in itertools.chain(self.blockchain.txos_of_addresses(self.addresses, session, limit=limit),
|
||||
self.mempool.txos_of_addresses(self.addresses)):
|
||||
# Skip if change
|
||||
try:
|
||||
other = txo.transaction.outputs[1 - txo._index]
|
||||
if (txo.transaction.hash, other.value, other.address) in self.sent_outputs:
|
||||
continue
|
||||
except IndexError:
|
||||
pass # There may be no change; this is fine
|
||||
|
||||
row = {
|
||||
"Date": "In Mempool",
|
||||
"Type": "Mined" if isinstance(txo.transaction, CoinbaseTransaction) else "Received",
|
||||
"Address": txo.address.hex(),
|
||||
"Amount": txo.value,
|
||||
"Fee": txo.transaction.fee(self.blockchain, self.mempool, session),
|
||||
"Confirmations": 0
|
||||
}
|
||||
|
||||
# If transaction is in blockchain, update row with additional information
|
||||
try:
|
||||
row.update({
|
||||
"Date": datetime.fromtimestamp(txo.transaction.block.timestamp),
|
||||
"Confirmations": (header.height - txo.transaction.block.height) + 1
|
||||
})
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
history.append(row)
|
||||
|
||||
# Sent coins cannot be calculated because it is impossible to tell which output is change. Therefore,
|
||||
# whenever we send a transaction its hash, amount, and receiver's address is saved to the history file.
|
||||
for (tx_hash, amount, receiver_address) in self.sent_outputs:
|
||||
transaction = self.blockchain.transaction(tx_hash, session) or self.mempool.transaction(tx_hash)
|
||||
|
||||
# Only include in history if transaction exists in mempool or blockchain.
|
||||
if not transaction:
|
||||
continue
|
||||
|
||||
row = {
|
||||
"Date": "In Mempool",
|
||||
"Type": "Sent",
|
||||
"Address": receiver_address.hex(),
|
||||
"Amount": amount,
|
||||
"Fee": transaction.fee(self.blockchain, self.mempool, session),
|
||||
"Confirmations": 0
|
||||
}
|
||||
|
||||
# If transaction is in blockchain, update row with additional information
|
||||
try:
|
||||
row.update({
|
||||
"Date": datetime.fromtimestamp(transaction.block.timestamp),
|
||||
"Confirmations": (header.height - transaction.block.height) + 1
|
||||
})
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
history.append(row)
|
||||
|
||||
return sorted(history, key=lambda r: r["Confirmations"])[:limit]
|
||||
|
||||
def new_address(self) -> bytes:
|
||||
"""
|
||||
Generate and return a new wallet address.
|
||||
"""
|
||||
keypair = dsa.generate_keypair()
|
||||
address = util.address(public_bytes(keypair.public))
|
||||
|
||||
self.keys[address] = keypair
|
||||
self.save_keys()
|
||||
|
||||
return address
|
||||
|
||||
def sign(self, transaction, session):
|
||||
"""
|
||||
Sign the given transaction using the wallet keys according to Bitcoin's SIGHASH_ALL,
|
||||
|
||||
:param transaction: The transaction to sign.
|
||||
:param session: Database session.
|
||||
"""
|
||||
for input, copy_hash in transaction.truncated_copies(self.blockchain, self.mempool, session):
|
||||
# Find the output we are spending; we need to know which address it was sent to so we know which key to use
|
||||
output = self.blockchain.txo(input.prev_tx_hash, input.txout_index, session) or \
|
||||
self.mempool.txo(input.prev_tx_hash, input.txout_index)
|
||||
|
||||
keypair = self.keys[output.address]
|
||||
|
||||
input.public_key = public_bytes(keypair.public)
|
||||
input.signature = dsa.sign(keypair.private, copy_hash)
|
||||
|
||||
def make_transaction(self, receiver_address: bytes, amount: int, fee=0):
|
||||
"""
|
||||
Construct a transaction based on the wallet's UTXOs and give it to the validator.
|
||||
:param receiver_address: Receiver's address.
|
||||
:param amount: Amount of coins to send.
|
||||
:param fee: The fee.
|
||||
"""
|
||||
with session_scope() as session:
|
||||
# Use our confirmed UTXOs from the blockchain until we have enough. Use unconfirmed transactions from the
|
||||
# mempool if we don't have enough on the blockchain to cover the amount.
|
||||
spent = self.mempool.spent
|
||||
inputs = []
|
||||
input_sum = 0
|
||||
for utxo in itertools.chain(self.blockchain.utxos_of_addresses(self.addresses, session),
|
||||
self.mempool.utxos_of_addresses(self.addresses)):
|
||||
# Skip if UTXO is already spent by another transaction in the mempool. This check is redundant with
|
||||
# mempool.utxo[..] but it stops us from trying to spend the same UTXO from the blockchain over and over
|
||||
# when trying to send multiple transactions between new blocks.
|
||||
if (utxo.transaction.hash, utxo._index) in spent:
|
||||
continue
|
||||
|
||||
inputs.append(Input(prev_tx_hash=utxo.transaction.hash, txout_index=utxo._index))
|
||||
input_sum += utxo.value
|
||||
|
||||
if input_sum >= amount + fee:
|
||||
break
|
||||
else:
|
||||
print("You do not have enough balance to cover the transaction amount + fee")
|
||||
return
|
||||
|
||||
# Construct transaction outputs; first with the output to the receiver
|
||||
outputs = [Output(amount, receiver_address)]
|
||||
|
||||
# Outputs are always spent in their entirety; if the input_sum + fee is less than the amount, send excess
|
||||
# coins (the change) back to ourselves.
|
||||
change = input_sum - amount - fee
|
||||
if change:
|
||||
outputs.append(Output(change, self.new_address()))
|
||||
|
||||
# Shuffle output list so no one knows which output is to the receiver and which is change
|
||||
random.shuffle(outputs)
|
||||
|
||||
# Construct and sign transaction
|
||||
transaction = Transaction(inputs=inputs, outputs=outputs)
|
||||
self.sign(transaction, session)
|
||||
|
||||
# Save to history amount of coins and to whom we sent
|
||||
self.sent_outputs.add((transaction.hash, amount, receiver_address))
|
||||
self.save_sent_outputs()
|
||||
|
||||
self.notify_new_transaction(transaction)
|
||||
|
||||
def encrypt(self):
|
||||
"""
|
||||
Enable wallet encryption
|
||||
"""
|
||||
self.password = bytes(getpass("Please enter new wallet password: "), encoding="utf8")
|
||||
|
||||
if not self.password:
|
||||
print("Wallet encryption removed.")
|
||||
|
||||
self.save_keys()
|
||||
|
||||
def save_keys(self):
|
||||
"""
|
||||
Save wallet keys to file.
|
||||
"""
|
||||
if self.password:
|
||||
encryption_algorithm = serialization.BestAvailableEncryption(self.password)
|
||||
else:
|
||||
encryption_algorithm = serialization.NoEncryption()
|
||||
|
||||
# Serialize, convert and save private keys. Public keys and addresses are not saved as they can be derived from
|
||||
# the private key.
|
||||
serialized_private_keys = [
|
||||
keypair.private.private_bytes(
|
||||
encoding=serialization.Encoding.PEM,
|
||||
format=serialization.PrivateFormat.PKCS8,
|
||||
encryption_algorithm=encryption_algorithm
|
||||
).hex()
|
||||
for keypair in self.keys.copy().values()]
|
||||
|
||||
with open(config.data_dir.joinpath("wallet.json"), "w") as file:
|
||||
json.dump(serialized_private_keys, file)
|
||||
|
||||
def load_keys(self):
|
||||
"""
|
||||
Load wallet keys from file.
|
||||
"""
|
||||
with open(config.data_dir.joinpath("wallet.json"), "r") as file:
|
||||
serialized_private_keys = json.load(file)
|
||||
|
||||
while True:
|
||||
try:
|
||||
self.keys = {}
|
||||
for serialized_private_key in serialized_private_keys:
|
||||
private = serialization.load_pem_private_key(
|
||||
bytes.fromhex(serialized_private_key),
|
||||
password=self.password,
|
||||
backend=default_backend()
|
||||
)
|
||||
public = private.public_key()
|
||||
address = util.address(public_bytes(public))
|
||||
self.keys[address] = Keypair(private, public)
|
||||
return
|
||||
|
||||
except (TypeError, ValueError) as e:
|
||||
if e.__class__ is ValueError:
|
||||
print("Sorry, try again.")
|
||||
self.password = bytes(getpass("Please enter wallet password: "), encoding="utf8")
|
||||
|
||||
def save_sent_outputs(self):
|
||||
with open(config.data_dir.joinpath("sent_outputs.json"), "w") as file:
|
||||
l = [(transaction_hash.hex(), amount, receiver_address.hex())
|
||||
for (transaction_hash, amount, receiver_address) in self.sent_outputs]
|
||||
json.dump(l, file)
|
||||
|
||||
def load_sent_outputs(self):
|
||||
with open(config.data_dir.joinpath("sent_outputs.json"), "r") as file:
|
||||
self.sent_outputs = {(bytes.fromhex(transaction_hash), amount, bytes.fromhex(receiver_address))
|
||||
for (transaction_hash, amount, receiver_address) in json.load(file)}
|
||||
|
||||
def subscribe_new_transaction(self, callback):
|
||||
self._new_transaction_subscribers.add(callback)
|
||||
|
||||
def notify_new_transaction(self, transaction):
|
||||
for subscriber in self._new_transaction_subscribers:
|
||||
subscriber(transaction)
|
||||
|
||||
|
||||
def public_bytes(public_key):
|
||||
return public_key.public_bytes(Encoding.DER, PublicFormat.SubjectPublicKeyInfo)
|
BIN
images/catchup.png
Normal file
BIN
images/catchup.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 30 KiB |
BIN
images/history.png
Normal file
BIN
images/history.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 137 KiB |
BIN
images/logo.png
Normal file
BIN
images/logo.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 434 KiB |
BIN
images/status.png
Normal file
BIN
images/status.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 68 KiB |
61
setup.py
Normal file
61
setup.py
Normal file
|
@ -0,0 +1,61 @@
|
|||
# Always prefer setuptools over distutils
|
||||
from setuptools import setup, find_packages
|
||||
# To use a consistent encoding
|
||||
from codecs import open
|
||||
from os import path
|
||||
|
||||
from aucoin import __author__, __version__, __licence__
|
||||
|
||||
|
||||
here = path.abspath(path.dirname(__file__))
|
||||
|
||||
# Get the long description from the README file
|
||||
with open(path.join(here, "README.md"), encoding="utf-8") as f:
|
||||
long_description = f.read()
|
||||
|
||||
|
||||
setup(
|
||||
name="aucoin",
|
||||
version=__version__,
|
||||
description="A distributed cryptocurrency",
|
||||
long_description=long_description,
|
||||
long_description_content_type="text/markdown",
|
||||
url="https://aucoin.network",
|
||||
project_urls={
|
||||
"Source": "https://git.caspervk.net/caspervk/aucoin.git"
|
||||
},
|
||||
author=__author__,
|
||||
classifiers=[
|
||||
"Development Status :: 3 - Alpha",
|
||||
"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
|
||||
"Programming Language :: Python :: 3",
|
||||
],
|
||||
python_requires=">=3.6",
|
||||
keywords="crypto currency cryptocurrency",
|
||||
license=__licence__,
|
||||
packages=find_packages(exclude=["tests"]),
|
||||
include_package_data=True,
|
||||
package_data={
|
||||
"aucoin": [
|
||||
"*.ini"
|
||||
]
|
||||
},
|
||||
install_requires=[
|
||||
"Click",
|
||||
"cryptography",
|
||||
"SQLAlchemy",
|
||||
"twisted",
|
||||
"tabulate"
|
||||
],
|
||||
extras_require={
|
||||
"dev": [
|
||||
"freezegun",
|
||||
"matplotlib"
|
||||
]
|
||||
},
|
||||
entry_points={
|
||||
"console_scripts": [
|
||||
"aucoin = aucoin.main:main"
|
||||
]
|
||||
},
|
||||
)
|
0
tests/__init__.py
Normal file
0
tests/__init__.py
Normal file
21
tests/helpers.py
Normal file
21
tests/helpers.py
Normal file
|
@ -0,0 +1,21 @@
|
|||
from threading import RLock
|
||||
|
||||
from cryptography.hazmat.primitives.asymmetric import ec
|
||||
|
||||
from aucoin import dsa
|
||||
from aucoin.block import Block
|
||||
|
||||
|
||||
def mine(block: Block, private_key: ec.EllipticCurvePrivateKey):
|
||||
while True:
|
||||
block.signature = dsa.sign(private_key, block.truncated_header)
|
||||
|
||||
if block.hash <= block.target:
|
||||
break
|
||||
|
||||
return block
|
||||
|
||||
|
||||
class Core(object):
|
||||
def __init__(self):
|
||||
self.lock = RLock()
|
306
tests/test_blockchain.py
Normal file
306
tests/test_blockchain.py
Normal file
|
@ -0,0 +1,306 @@
|
|||
import logging
|
||||
import unittest
|
||||
from copy import deepcopy, copy
|
||||
from statistics import median
|
||||
|
||||
import time
|
||||
from freezegun import freeze_time
|
||||
|
||||
from aucoin import consensus
|
||||
from aucoin import config
|
||||
from aucoin.transactions import Input, Transaction, CoinbaseTransaction, Output
|
||||
from aucoin.block import Block
|
||||
from aucoin.blockchain import Blockchain, ExtendingBranchType
|
||||
from aucoin.mempool import Mempool
|
||||
from aucoin.validation import InvalidBlockException, Validator
|
||||
from aucoin.wallet import Wallet
|
||||
from tests.helpers import mine
|
||||
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
|
||||
|
||||
class TestBlockchain(unittest.TestCase):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
# set up blockchain
|
||||
cls.blockchain = Blockchain(clear=True)
|
||||
|
||||
def setUp(self):
|
||||
self.blockchain._reset()
|
||||
|
||||
# generate wallet
|
||||
self.wallet = Wallet()
|
||||
self.wallet.generate()
|
||||
|
||||
# TODO: Setup utxo
|
||||
|
||||
self.mempool = Mempool()
|
||||
|
||||
# valid transaction used for testing
|
||||
self.tx = Transaction(
|
||||
inputs=[
|
||||
Input(
|
||||
prev_tx_hash=b"tx_hash",
|
||||
txout_index=2
|
||||
)
|
||||
],
|
||||
outputs=[
|
||||
Output(
|
||||
value=40,
|
||||
address=self.wallet.address
|
||||
)
|
||||
]
|
||||
)
|
||||
|
||||
self.tx.inputs[0].signature = self.wallet.sign(self.tx.hash)
|
||||
|
||||
self.coinbase = CoinbaseTransaction(
|
||||
self.wallet.public_key,
|
||||
value=100,
|
||||
block_height=1
|
||||
)
|
||||
|
||||
self.block = mine(
|
||||
Block(
|
||||
hash_prev_block=self.blockchain.genesis_block.hash,
|
||||
target=bytes.fromhex("00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
|
||||
transactions=[self.coinbase],
|
||||
branch_type="main_branch"
|
||||
)
|
||||
)
|
||||
|
||||
self.validator = Validator(self.blockchain, self.blockchain.mempool)
|
||||
|
||||
self.blockchain.add(self.block)
|
||||
|
||||
def test_has_utxo_of_coinbase(self):
|
||||
utxos = self.blockchain.utxos_of_addresses([self.wallet.public_key])
|
||||
self.assertTrue(len(utxos) > 0)
|
||||
|
||||
def test_doesnt_get_non_existing_block(self):
|
||||
block = self.blockchain.block(b"nonexistent")
|
||||
self.assertEqual(block, None)
|
||||
|
||||
def test_get_block(self):
|
||||
block = self.blockchain.block(self.block.hash)
|
||||
|
||||
self.assertEqual(block.hash, self.block.hash)
|
||||
self.assertEqual(block.merkle_root_hash, self.block.merkle_root_hash)
|
||||
self.assertEqual(block.nonce, self.block.nonce)
|
||||
|
||||
def test_get_header(self):
|
||||
block = self.blockchain.header
|
||||
self.assertEqual(block.hash, self.block.hash)
|
||||
self.assertEqual(block.merkle_root_hash, self.block.merkle_root_hash)
|
||||
self.assertEqual(block.nonce, self.block.nonce)
|
||||
|
||||
def test_get_utxo_of_address(self):
|
||||
utxos = self.blockchain.utxos_of_address(self.wallet.public_key)
|
||||
self.assertEqual(1, len(utxos))
|
||||
|
||||
@freeze_time("2010-01-01")
|
||||
def test_calculates_median_of_timestamps_of_blocks(self):
|
||||
timestamps = []
|
||||
hash_prev_block = self.block.hash
|
||||
# Create a lot of blocks to the blockchain to calculate median for.
|
||||
for i in range(1, consensus.block_median_timestamp_nblocks +1):
|
||||
coinbase = CoinbaseTransaction(self.wallet.public_key,
|
||||
value=100,
|
||||
block_height=self.coinbase.block_height + i)
|
||||
block = Block(hash_prev_block=hash_prev_block,
|
||||
target=self.block.target,
|
||||
transactions=[coinbase])
|
||||
block.timestamp += i*111
|
||||
block.total_work = self.coinbase.block_height + i*10
|
||||
hash_prev_block = block.hash
|
||||
self.blockchain.add(block)
|
||||
timestamps.append(block.timestamp)
|
||||
|
||||
# Add a block to a side-branch to ensure that we only count blocks on the main-branch
|
||||
coinbase = CoinbaseTransaction(self.wallet.public_key,
|
||||
value=100,
|
||||
block_height=2)
|
||||
side_branch_block = Block(hash_prev_block=self.block.hash,
|
||||
target=self.block.target,
|
||||
transactions=[coinbase])
|
||||
side_branch_block.total_work = 1
|
||||
side_branch_block.timestamp += 1000
|
||||
|
||||
self.blockchain.add(side_branch_block)
|
||||
true_med = median(timestamps)
|
||||
med = self.blockchain.get_median_timestamp(self.blockchain.header)
|
||||
self.assertEqual(true_med, med)
|
||||
|
||||
def test_doesnt_add_invalid_block_to_blockchain(self):
|
||||
coinbase = CoinbaseTransaction(
|
||||
self.wallet.public_key,
|
||||
value=100,
|
||||
block_height=2
|
||||
)
|
||||
block = Block(
|
||||
hash_prev_block=self.block.hash,
|
||||
target=self.block.target,
|
||||
transactions=[coinbase]
|
||||
)
|
||||
self.assertRaises(InvalidBlockException, self.blockchain.add, block, self.validator)
|
||||
|
||||
@unittest.skip
|
||||
def test_adds_block_to_blockchain(self):
|
||||
coinbase = CoinbaseTransaction(
|
||||
self.wallet.public_key,
|
||||
value=100,
|
||||
block_height=2
|
||||
)
|
||||
new_block = mine(
|
||||
Block(
|
||||
hash_prev_block=self.block.hash,
|
||||
target=self.block.target,
|
||||
transactions=[coinbase],
|
||||
)
|
||||
)
|
||||
self.assertTrue(self.blockchain.add(new_block, self.validator))
|
||||
|
||||
def test_block_is_extending_main_branch(self):
|
||||
self.assertEqual(
|
||||
self.block.branch_type,
|
||||
ExtendingBranchType.EXTENDS_MAIN_BRANCH.value)
|
||||
|
||||
def test_block_is_extending_side_branch_becoming_new_main_branch(self):
|
||||
coinbase = CoinbaseTransaction(
|
||||
self.wallet.public_key,
|
||||
value=100
|
||||
)
|
||||
block = mine(
|
||||
Block(
|
||||
hash_prev_block=self.blockchain.genesis_block.hash,
|
||||
target=self.block.target,
|
||||
transactions=[coinbase],
|
||||
timestamp=int(time.time() + 100)
|
||||
)
|
||||
)
|
||||
coinbase2 = CoinbaseTransaction(
|
||||
self.wallet.public_key,
|
||||
value=100
|
||||
)
|
||||
block2 = mine(
|
||||
Block(
|
||||
hash_prev_block=block.hash,
|
||||
target=block.target,
|
||||
transactions=[coinbase2],
|
||||
timestamp=int(time.time() + 110)
|
||||
)
|
||||
)
|
||||
self.blockchain.add(block)
|
||||
self.blockchain.add(block2)
|
||||
|
||||
self.assertEqual(
|
||||
block2.branch_type,
|
||||
ExtendingBranchType.EXTENDS_SIDE_BRANCH_NEW_MAIN_BRANCH.value
|
||||
)
|
||||
|
||||
def test_block_is_extending_side_branch(self):
|
||||
coinbase = CoinbaseTransaction(
|
||||
self.wallet.public_key,
|
||||
value=100,
|
||||
block_height=2
|
||||
)
|
||||
side_block = mine(
|
||||
Block(
|
||||
hash_prev_block=self.blockchain.genesis_block.hash,
|
||||
target=self.block.target,
|
||||
transactions=[self.coinbase],
|
||||
timestamp=int(time.time() + 100)
|
||||
)
|
||||
)
|
||||
main_block = mine(
|
||||
Block(
|
||||
hash_prev_block=self.block.hash,
|
||||
target=self.block.target,
|
||||
transactions=[coinbase],
|
||||
timestamp=int(time.time() + 100)
|
||||
)
|
||||
)
|
||||
|
||||
self.blockchain.add(side_block)
|
||||
self.blockchain.add(main_block)
|
||||
self.assertEqual(
|
||||
side_block.branch_type,
|
||||
ExtendingBranchType.EXTENDS_SIDE_BRANCH.value
|
||||
)
|
||||
|
||||
def test_reorganize_main_branch(self):
|
||||
coinbase1a = CoinbaseTransaction(
|
||||
self.wallet.public_key,
|
||||
value=100,
|
||||
block_height=2
|
||||
)
|
||||
coinbase2a = CoinbaseTransaction(
|
||||
self.wallet.public_key,
|
||||
value=100,
|
||||
block_height=3
|
||||
)
|
||||
coinbase1b = CoinbaseTransaction(
|
||||
self.wallet.public_key,
|
||||
value=100,
|
||||
block_height=2
|
||||
)
|
||||
coinbase2b = CoinbaseTransaction(
|
||||
self.wallet.public_key,
|
||||
value=100,
|
||||
block_height=3
|
||||
)
|
||||
block1a = Block(
|
||||
hash_prev_block=self.block.hash,
|
||||
target=self.block.target,
|
||||
transactions=[coinbase1a],
|
||||
timestamp=int(time.time() + 100)
|
||||
)
|
||||
block2a = Block(
|
||||
hash_prev_block=block1a.hash,
|
||||
target=self.block.target,
|
||||
transactions=[coinbase2a],
|
||||
timestamp=int(time.time() + 104)
|
||||
)
|
||||
block1b = Block(
|
||||
hash_prev_block=self.blockchain.genesis_block.hash,
|
||||
target=self.block.target,
|
||||
transactions=[coinbase1b],
|
||||
timestamp=int(time.time() + 101)
|
||||
)
|
||||
block2b = Block(
|
||||
hash_prev_block=block1b.hash,
|
||||
target=self.block.target,
|
||||
transactions=[coinbase2b],
|
||||
timestamp=int(time.time() + 102)
|
||||
)
|
||||
|
||||
self.blockchain.add(block1a)
|
||||
self.blockchain.add(block2a)
|
||||
self.blockchain.add(block1b)
|
||||
self.blockchain.add(block2b)
|
||||
|
||||
changed = self.blockchain.reorganize_main_branch(block2b)
|
||||
|
||||
self.assertTrue(changed)
|
||||
block1a = self.blockchain.block(block1a.hash)
|
||||
block1b = self.blockchain.block(block1b.hash)
|
||||
block2a = self.blockchain.block(block2a.hash)
|
||||
block2b = self.blockchain.block(block2b.hash)
|
||||
|
||||
self.assertEqual(block1a.branch_type, ExtendingBranchType.EXTENDS_SIDE_BRANCH.value)
|
||||
self.assertEqual(block1a.transactions[0].branch_type, ExtendingBranchType.EXTENDS_SIDE_BRANCH.value)
|
||||
self.assertEqual(block2a.branch_type, ExtendingBranchType.EXTENDS_SIDE_BRANCH.value)
|
||||
self.assertEqual(block2a.transactions[0].branch_type, ExtendingBranchType.EXTENDS_SIDE_BRANCH.value)
|
||||
self.assertEqual(block1b.branch_type, ExtendingBranchType.EXTENDS_MAIN_BRANCH.value)
|
||||
self.assertEqual(block1b.transactions[0].branch_type, ExtendingBranchType.EXTENDS_MAIN_BRANCH.value)
|
||||
self.assertEqual(block2b.branch_type, ExtendingBranchType.EXTENDS_MAIN_BRANCH.value)
|
||||
self.assertEqual(block2b.transactions[0].branch_type, ExtendingBranchType.EXTENDS_MAIN_BRANCH.value)
|
||||
|
||||
def test_get_utxo_of_tx_hash_at_index(self):
|
||||
utxo = self.blockchain.get_utxo_of_tx_hash_at_index(self.block.transactions[0].hash, 0)
|
||||
self.assertTrue(utxo.unspent)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
32
tests/test_dsa.py
Normal file
32
tests/test_dsa.py
Normal file
|
@ -0,0 +1,32 @@
|
|||
import unittest
|
||||
|
||||
import logging
|
||||
|
||||
from aucoin import dsa
|
||||
|
||||
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
|
||||
|
||||
class TestDSA(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.private, self.public = dsa.generate_keypair()
|
||||
|
||||
self.data = b"Hello, world!"
|
||||
self.data_tampered = b"Goodbye, world!"
|
||||
self.signature_tampered = bytes(70) # 0x00...00
|
||||
|
||||
def test_sign_verify(self):
|
||||
signature = dsa.sign(self.private, self.data)
|
||||
self.assertTrue(dsa.verify(self.public, self.data, signature))
|
||||
|
||||
def test_reject_tampered_data(self):
|
||||
signature = dsa.sign(self.private, self.data)
|
||||
self.assertFalse(dsa.verify(self.public, self.data_tampered, signature))
|
||||
|
||||
def test_reject_tampered_signature(self):
|
||||
self.assertFalse(dsa.verify(self.public, self.data, self.signature_tampered))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
43
tests/test_miner.py
Normal file
43
tests/test_miner.py
Normal file
|
@ -0,0 +1,43 @@
|
|||
import logging
|
||||
import unittest
|
||||
from unittest.mock import Mock
|
||||
|
||||
from aucoin.blockchain import Blockchain, Mempool
|
||||
from aucoin.miner import Miner, find_best_transactions
|
||||
from aucoin.transactions import Transaction, Input, Output
|
||||
from aucoin.wallet import Wallet
|
||||
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
|
||||
|
||||
class TestMiner(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.wallet = Wallet()
|
||||
self.blockchain = Blockchain()
|
||||
self.mempool = Mempool()
|
||||
|
||||
# start miner
|
||||
callback_mock = Mock()
|
||||
miner = Miner(self.wallet.address, self.blockchain, self.mempool, callback_mock)
|
||||
|
||||
# wait until callback has been called by the miner (until a block has been found)
|
||||
while not callback_mock.called:
|
||||
pass
|
||||
|
||||
# stop miner
|
||||
miner.stop()
|
||||
|
||||
# the arguments that the callback_mock was last called with:
|
||||
self.found_block = callback_mock.call_args[0][0]
|
||||
|
||||
@unittest.skip
|
||||
def test_mined_blocks_are_valid(self):
|
||||
self.assertTrue(self.found_block.validate()) # TODO
|
||||
|
||||
def test_correct_coinbase_address(self):
|
||||
coinbase_tx = self.found_block.transactions[0]
|
||||
self.assertEqual(coinbase_tx.outputs[0].address, self.wallet.address)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
29
tests/test_network.py
Normal file
29
tests/test_network.py
Normal file
|
@ -0,0 +1,29 @@
|
|||
import json
|
||||
|
||||
import logging
|
||||
from twisted.test import proto_helpers
|
||||
from twisted.trial import unittest
|
||||
|
||||
from aucoin.network import NetworkFactory, MsgType
|
||||
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
|
||||
|
||||
# http://twistedmatrix.com/documents/current/core/howto/trial.html#twisted-specific-testing
|
||||
|
||||
@unittest.SkipTest
|
||||
class TestNetwork(unittest.TestCase):
|
||||
def setUp(self):
|
||||
factory = NetworkFactory()
|
||||
self.protocol = factory.buildProtocol(("127.0.0.1", 0))
|
||||
self.tr = proto_helpers.StringTransport()
|
||||
self.protocol.makeConnection(self.tr)
|
||||
self.tr.clear()
|
||||
|
||||
def test_ping(self):
|
||||
self.protocol.send(MsgType.PING, None)
|
||||
self.assertEqual(decode(self.tr.value()), {"msg_type": "PING", "payload": None})
|
||||
|
||||
|
||||
def decode(data):
|
||||
return json.loads(data)
|
70
tests/test_util.py
Normal file
70
tests/test_util.py
Normal file
|
@ -0,0 +1,70 @@
|
|||
import logging
|
||||
import unittest
|
||||
|
||||
from aucoin import util
|
||||
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
|
||||
|
||||
class TestUtil(unittest.TestCase):
|
||||
def test_hash(self):
|
||||
digest = util.hash(b"Hello, world!")
|
||||
self.assertEqual("315f5bdb76d078c43b8ac0064e4a0164612b1fce77c869345bfc94c75894edd3", digest.hex())
|
||||
|
||||
def test_merkle_root_example(self):
|
||||
"""
|
||||
This test is based on the example from https://en.bitcoin.it/wiki/Protocol_documentation#Merkle_Trees
|
||||
"""
|
||||
a = b"a"
|
||||
b = b"b"
|
||||
c = b"c"
|
||||
|
||||
# 1st layer
|
||||
d1 = util.hash(a)
|
||||
d2 = util.hash(b)
|
||||
d3 = util.hash(c)
|
||||
d4 = util.hash(c) # odd number of elements, so we take c twice.
|
||||
|
||||
# 2nd layer
|
||||
d5 = util.hash(d1 + d2)
|
||||
d6 = util.hash(d3 + d4)
|
||||
|
||||
# 3rd layer: the root
|
||||
d7 = util.hash(d5 + d6)
|
||||
|
||||
self.assertEqual(d7, util.merkle_root_hash([a, b, c]))
|
||||
|
||||
def test_merkle_root_long(self):
|
||||
a = b"a"
|
||||
b = b"b"
|
||||
c = b"c"
|
||||
d = b"d"
|
||||
e = b"e"
|
||||
f = b"f"
|
||||
|
||||
# 1st layer
|
||||
d1 = util.hash(a)
|
||||
d2 = util.hash(b)
|
||||
d3 = util.hash(c)
|
||||
d4 = util.hash(d)
|
||||
d5 = util.hash(e)
|
||||
d6 = util.hash(f)
|
||||
|
||||
# 2nd layer
|
||||
d7 = util.hash(d1 + d2)
|
||||
d8 = util.hash(d3 + d4)
|
||||
d9 = util.hash(d5 + d6)
|
||||
d10 = d9 # odd number of elements, so we take d9 twice.
|
||||
|
||||
# 3rd layer
|
||||
d11 = util.hash(d7 + d8)
|
||||
d12 = util.hash(d9 + d10)
|
||||
|
||||
# 4th layer: the root
|
||||
d13 = util.hash(d11 + d12)
|
||||
|
||||
self.assertEqual(d13, util.merkle_root_hash([a, b, c, d, e, f]))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
214
tests/test_validator_SyntaxChecker.py
Normal file
214
tests/test_validator_SyntaxChecker.py
Normal file
|
@ -0,0 +1,214 @@
|
|||
import logging
|
||||
import unittest
|
||||
|
||||
from aucoin import consensus, dsa, util
|
||||
from aucoin.block import Block
|
||||
from aucoin.exceptions import InvalidTransactionException, InvalidBlockException
|
||||
from aucoin.transactions import Transaction, Input, Output, CoinbaseTransaction
|
||||
from aucoin.validation import SyntaxChecker
|
||||
from aucoin.wallet import public_bytes
|
||||
from tests import helpers
|
||||
|
||||
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
|
||||
|
||||
class TestCheckTransaction(unittest.TestCase):
|
||||
def setUp(self):
|
||||
# A valid transaction used for testing.
|
||||
self.transaction = Transaction(
|
||||
inputs=[
|
||||
Input(
|
||||
prev_tx_hash=b"hash1",
|
||||
txout_index=2
|
||||
),
|
||||
Input(
|
||||
prev_tx_hash=b"hash2",
|
||||
txout_index=0
|
||||
),
|
||||
],
|
||||
outputs=[
|
||||
Output(
|
||||
value=50,
|
||||
address=b"addr1"
|
||||
),
|
||||
Output(
|
||||
value=150,
|
||||
address=b"addr2"
|
||||
)
|
||||
]
|
||||
)
|
||||
|
||||
def test_valid(self):
|
||||
SyntaxChecker.check_transaction(self.transaction)
|
||||
|
||||
def test_reject_empty_input_list(self):
|
||||
self.transaction.inputs = []
|
||||
self.assertRaisesRegex(InvalidTransactionException, "Input list is empty",
|
||||
SyntaxChecker.check_transaction, self.transaction)
|
||||
|
||||
def test_reject_empty_output_list(self):
|
||||
self.transaction.outputs = []
|
||||
self.assertRaisesRegex(InvalidTransactionException, "Output list is empty",
|
||||
SyntaxChecker.check_transaction, self.transaction)
|
||||
|
||||
def test_reject_zero_output_value(self):
|
||||
self.transaction.outputs[1].value = 0
|
||||
self.assertRaisesRegex(InvalidTransactionException, "Negative or zero output value for one or more outputs",
|
||||
SyntaxChecker.check_transaction, self.transaction)
|
||||
|
||||
def test_reject_size_too_large(self):
|
||||
self.transaction.outputs[1].address = bytes(consensus.block_max_size + 1)
|
||||
self.assertRaisesRegex(InvalidTransactionException, "Transaction-size larger than max block size",
|
||||
SyntaxChecker.check_transaction, self.transaction)
|
||||
|
||||
def test_reject_negative_output_value(self):
|
||||
self.transaction.outputs[1].value = -10
|
||||
self.assertRaisesRegex(InvalidTransactionException, "Negative or zero output value for one or more outputs",
|
||||
SyntaxChecker.check_transaction, self.transaction)
|
||||
|
||||
def test_reject_duplicate_inputs(self):
|
||||
self.transaction.inputs.append(self.transaction.inputs[1])
|
||||
self.assertRaisesRegex(InvalidTransactionException, "Transaction contains duplicate inputs",
|
||||
SyntaxChecker.check_transaction, self.transaction)
|
||||
|
||||
def test_reject_prev_hash_equals_zero(self):
|
||||
self.transaction.inputs[1].prev_tx_hash = bytes(32)
|
||||
self.assertRaisesRegex(InvalidTransactionException,
|
||||
"prev_tx_hash is 0x00...00 for one or more inputs in non-coinbase tx",
|
||||
SyntaxChecker.check_transaction, self.transaction)
|
||||
|
||||
|
||||
class TestCheckCoinbaseTransaction(unittest.TestCase):
|
||||
def setUp(self):
|
||||
# A valid CoinbaseTransaction used for testing.
|
||||
self.transaction = CoinbaseTransaction(
|
||||
address=b"addr1",
|
||||
block_height=1,
|
||||
coinbase=b"coinbasedata"
|
||||
)
|
||||
|
||||
def test_valid(self):
|
||||
SyntaxChecker.check_transaction(self.transaction)
|
||||
|
||||
def test_reject_multiple_inputs(self):
|
||||
self.transaction.inputs.append(Input(b"prev_tx_hash", 10))
|
||||
self.assertRaisesRegex(InvalidTransactionException, "Coinbase transactions may only have one input",
|
||||
SyntaxChecker.check_transaction, self.transaction)
|
||||
|
||||
def test_reject_multiple_outputs(self):
|
||||
self.transaction.outputs.append(Output(100, b"address"))
|
||||
self.assertRaisesRegex(InvalidTransactionException, "Coinbase transactions may only have one output",
|
||||
SyntaxChecker.check_transaction, self.transaction)
|
||||
|
||||
def test_reject_prev_hash_nonzero(self):
|
||||
self.transaction.inputs[0].prev_tx_hash = b"not zero"
|
||||
self.assertRaisesRegex(InvalidTransactionException, "Coinbase transaction must have prev_tx_hash = 0x00...00",
|
||||
SyntaxChecker.check_transaction, self.transaction)
|
||||
|
||||
def test_reject_txout_index_nonzero(self):
|
||||
self.transaction.inputs[0].txout_index = 1
|
||||
self.assertRaisesRegex(InvalidTransactionException, "Coinbase transaction must have txout_index = 0",
|
||||
SyntaxChecker.check_transaction, self.transaction)
|
||||
|
||||
def test_reject_coinbase_too_large(self):
|
||||
self.transaction.inputs[0].coinbase = bytes(consensus.tx_coinbase_max_size + 1)
|
||||
self.assertRaisesRegex(InvalidTransactionException, "Size of coinbase parameter exceeds coinbase_max_size",
|
||||
SyntaxChecker.check_transaction, self.transaction)
|
||||
|
||||
|
||||
class TestCheckBlock(unittest.TestCase):
|
||||
def setUp(self):
|
||||
# A valid block used for testing.
|
||||
self.private_key, self.public_key = dsa.generate_keypair()
|
||||
block = Block(
|
||||
target=bytes.fromhex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
|
||||
public_key=public_bytes(self.public_key),
|
||||
transactions=[
|
||||
CoinbaseTransaction(
|
||||
address=util.address(public_bytes(self.public_key)),
|
||||
block_height=1,
|
||||
coinbase=b"coinbasedata"
|
||||
),
|
||||
Transaction(
|
||||
inputs=[
|
||||
Input(
|
||||
prev_tx_hash=b"hash1",
|
||||
txout_index=2
|
||||
)
|
||||
],
|
||||
outputs=[
|
||||
Output(
|
||||
value=50,
|
||||
address=b"addr1"
|
||||
)
|
||||
]
|
||||
)
|
||||
]
|
||||
)
|
||||
self.block = helpers.mine(block, self.private_key) # calculate correct signature
|
||||
|
||||
def test_valid(self):
|
||||
SyntaxChecker.check_block(self.block)
|
||||
|
||||
def test_reject_unsatisfactory_hash(self):
|
||||
self.block.target = bytes(32)
|
||||
self.assertRaisesRegex(InvalidBlockException, "Block hash doesn't satisfy claimed target proof of work",
|
||||
SyntaxChecker.check_block, self.block)
|
||||
|
||||
def test_reject_empty_transaction_list(self):
|
||||
self.block.transactions = []
|
||||
self.assertRaisesRegex(InvalidBlockException, "Transaction list must be non-empty",
|
||||
SyntaxChecker.check_block, helpers.mine(self.block, self.private_key))
|
||||
|
||||
def test_reject_wrong_merkle(self):
|
||||
self.block.transactions.pop()
|
||||
self.assertRaisesRegex(InvalidBlockException, "Incorrect Merkle root hash",
|
||||
SyntaxChecker.check_block, helpers.mine(self.block, self.private_key))
|
||||
|
||||
def test_reject_size_too_large(self):
|
||||
self.block.transactions[0].inputs[0].coinbase = bytes(consensus.block_max_size + 1)
|
||||
self.block.calculate_merkle()
|
||||
self.assertRaisesRegex(InvalidBlockException, "Block-size larger than max block size",
|
||||
SyntaxChecker.check_block, helpers.mine(self.block, self.private_key))
|
||||
|
||||
def test_reject_first_transaction_is_not_coinbase(self):
|
||||
del self.block.transactions[0]
|
||||
self.block.calculate_merkle()
|
||||
self.assertRaisesRegex(InvalidBlockException, "The first transaction must be coinbase",
|
||||
SyntaxChecker.check_block, helpers.mine(self.block, self.private_key))
|
||||
|
||||
def test_reject_non_first_transaction_is_coinbase(self):
|
||||
self.block.transactions.append(self.block.transactions[0])
|
||||
self.block.calculate_merkle()
|
||||
self.assertRaisesRegex(InvalidBlockException, "Only the first transaction may be coinbase",
|
||||
SyntaxChecker.check_block, helpers.mine(self.block, self.private_key))
|
||||
|
||||
def test_reject_invalid_signature(self):
|
||||
# A small "mining algorithm" that changes the block's hash by changing the signature (like normally in sign to
|
||||
# mine), but this signature is always invalid to provoke exception.
|
||||
nonce = 0
|
||||
while True:
|
||||
self.block.signature = nonce.to_bytes(8, "big")
|
||||
if self.block.hash <= self.block.target:
|
||||
break
|
||||
nonce += 1
|
||||
|
||||
self.assertRaisesRegex(InvalidBlockException, "Invalid block signature",
|
||||
SyntaxChecker.check_block, self.block)
|
||||
|
||||
def test_reject_wrong_public_key(self):
|
||||
self.block.transactions[0].outputs[0].address = b"Wrong"
|
||||
self.block.calculate_merkle()
|
||||
self.assertRaisesRegex(InvalidBlockException, "Public key doesn't match output address of the coinbase",
|
||||
SyntaxChecker.check_block, helpers.mine(self.block, self.private_key))
|
||||
|
||||
def test_reject_any_invalid_transaction(self):
|
||||
self.block.transactions[1].inputs = []
|
||||
self.block.calculate_merkle()
|
||||
self.assertRaisesRegex(InvalidTransactionException, "Input list is empty",
|
||||
SyntaxChecker.check_block, helpers.mine(self.block, self.private_key))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
92
tests/test_validator_block.py
Normal file
92
tests/test_validator_block.py
Normal file
|
@ -0,0 +1,92 @@
|
|||
import logging
|
||||
import unittest
|
||||
from datetime import datetime
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
from freezegun import freeze_time
|
||||
|
||||
from aucoin import consensus, dsa, util
|
||||
from aucoin.block import Block
|
||||
from aucoin.blockchain import Blockchain
|
||||
from aucoin.database import session_scope
|
||||
from aucoin.exceptions import InvalidBlockException, OrphanException
|
||||
from aucoin.mempool import Mempool
|
||||
from aucoin.network import Network
|
||||
from aucoin.transactions import CoinbaseTransaction
|
||||
from aucoin.validation import Validator
|
||||
from aucoin.wallet import public_bytes
|
||||
from tests import helpers
|
||||
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
|
||||
easy_target = bytes.fromhex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
|
||||
|
||||
|
||||
@patch("aucoin.consensus.required_target", MagicMock(return_value=easy_target))
|
||||
class TestAddBlock(unittest.TestCase):
|
||||
def setUp(self):
|
||||
# setup a fresh blockchain and mempool
|
||||
self.blockchain = Blockchain(reset=True)
|
||||
self.mempool = Mempool()
|
||||
self.network = Network(self.blockchain, self.mempool, max_peers=0)
|
||||
|
||||
# A valid block used for testing.
|
||||
with session_scope() as session:
|
||||
self.private_key, self.public_key = dsa.generate_keypair()
|
||||
self.block = Block(
|
||||
target=easy_target,
|
||||
hash_prev_block=self.blockchain.genesis_block(session).hash,
|
||||
public_key=public_bytes(self.public_key),
|
||||
transactions=[
|
||||
CoinbaseTransaction(
|
||||
address=util.address(public_bytes(self.public_key)),
|
||||
block_height=1
|
||||
)
|
||||
]
|
||||
)
|
||||
|
||||
self.validator = Validator(helpers.Core(), self.blockchain, self.mempool, self.network)
|
||||
|
||||
def test_valid(self):
|
||||
self.validator.add_block(helpers.mine(self.block, self.private_key))
|
||||
|
||||
def test_reject_invalid_syntax(self):
|
||||
self.block.transactions = []
|
||||
self.assertRaisesRegex(InvalidBlockException, "Transaction list must be non-empty",
|
||||
self.validator.add_block, helpers.mine(self.block, self.private_key))
|
||||
|
||||
def test_reject_duplicate(self):
|
||||
self.block = helpers.mine(self.block, self.private_key)
|
||||
self.validator.add_block(self.block)
|
||||
self.assertRaisesRegex(InvalidBlockException, "Already exists in blockchain",
|
||||
self.validator.add_block, self.block)
|
||||
|
||||
def test_reject_orphan(self):
|
||||
self.block.hash_prev_block = b"non existent"
|
||||
|
||||
with self.assertRaises(OrphanException) as cm:
|
||||
self.validator.add_block(helpers.mine(self.block, self.private_key))
|
||||
self.assertEqual(cm.exception.missing, b'non existent')
|
||||
|
||||
def test_reject_target_difficulty_rules_mismatch(self):
|
||||
self.block.target = bytes.fromhex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
|
||||
self.assertRaisesRegex(InvalidBlockException, "Target does not match the difficulty rules",
|
||||
self.validator.add_block, helpers.mine(self.block, self.private_key))
|
||||
|
||||
@freeze_time("2010-01-01")
|
||||
def test_reject_future_timestamp(self):
|
||||
self.block.timestamp = int((datetime(2010, 1, 1, 0, 0, 1) + consensus.block_max_future_time).timestamp())
|
||||
self.assertRaisesRegex(InvalidBlockException, "Block timestamp must not be more than block_max_future_time in the future",
|
||||
self.validator.add_block, helpers.mine(self.block, self.private_key))
|
||||
|
||||
def test_reject_old_timestamp(self):
|
||||
with session_scope() as session:
|
||||
self.block.timestamp = self.blockchain.genesis_block(session).timestamp
|
||||
self.assertRaisesRegex(InvalidBlockException, "Timestamp is before or equal to median time of the last n blocks",
|
||||
self.validator.add_block, helpers.mine(self.block, self.private_key))
|
||||
|
||||
def test_reject_incorrect_block_height(self):
|
||||
self.block.transactions[0].inputs[0].block_height = 3
|
||||
self.block.calculate_merkle()
|
||||
self.assertRaisesRegex(InvalidBlockException, "Block height is not equal to previous block's height \+ 1",
|
||||
self.validator.add_block, helpers.mine(self.block, self.private_key))
|
198
tests/test_validator_transaction.py
Normal file
198
tests/test_validator_transaction.py
Normal file
|
@ -0,0 +1,198 @@
|
|||
import logging
|
||||
import unittest
|
||||
from unittest.mock import patch
|
||||
|
||||
from aucoin import dsa, util
|
||||
from aucoin.block import Block
|
||||
from aucoin.blockchain import Blockchain
|
||||
from aucoin.database import session_scope
|
||||
from aucoin.exceptions import InvalidTransactionException, OrphanException
|
||||
from aucoin.mempool import Mempool
|
||||
from aucoin.network import Network
|
||||
from aucoin.transactions import Transaction, Input, Output, CoinbaseTransaction
|
||||
from aucoin.validation import Validator
|
||||
from aucoin.wallet import Wallet, public_bytes
|
||||
from tests import helpers
|
||||
from tests.helpers import mine
|
||||
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
|
||||
|
||||
class TestAddTransaction(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.blockchain = Blockchain(reset=True)
|
||||
self.mempool = Mempool()
|
||||
self.wallet = Wallet(self.blockchain, self.mempool)
|
||||
self.network = Network(self.blockchain, self.mempool, max_peers=0)
|
||||
self.validator = Validator(helpers.Core(), self.blockchain, self.mempool, self.network)
|
||||
|
||||
# add a block with an unspent transaction output we can reference
|
||||
with session_scope() as session:
|
||||
genesis = self.blockchain.genesis_block(session)
|
||||
address = self.wallet.new_address()
|
||||
self.private_key, self.public_key = self.wallet.keys[address]
|
||||
self.block = Block(
|
||||
hash_prev_block=genesis.hash,
|
||||
target=genesis.target,
|
||||
public_key=public_bytes(self.public_key),
|
||||
transactions=[
|
||||
CoinbaseTransaction(
|
||||
address=address,
|
||||
value=100,
|
||||
block_height=1
|
||||
)
|
||||
]
|
||||
)
|
||||
self.validator.add_block(mine(self.block, self.private_key))
|
||||
|
||||
# the transaction which we will be modifying to provoke validation exceptions
|
||||
self.transaction = Transaction(
|
||||
inputs=[
|
||||
Input(
|
||||
prev_tx_hash=self.block.transactions[0].hash,
|
||||
txout_index=0
|
||||
)
|
||||
],
|
||||
outputs=[
|
||||
Output(
|
||||
value=50,
|
||||
address=b"some_address"
|
||||
)
|
||||
]
|
||||
)
|
||||
self.wallet.sign(self.transaction, session)
|
||||
|
||||
def test_valid(self):
|
||||
self.validator.add_transaction(self.transaction)
|
||||
|
||||
def test_reject_invalid_syntax(self):
|
||||
self.transaction.inputs = []
|
||||
self.assertRaisesRegex(InvalidTransactionException, "Input list is empty",
|
||||
self.validator.add_transaction, self.transaction)
|
||||
|
||||
def test_reject_coinbase_transaction(self):
|
||||
coinbase_transaction = CoinbaseTransaction(b"some_address")
|
||||
self.assertRaisesRegex(InvalidTransactionException, "Standalone transaction cannot be coinbase",
|
||||
self.validator.add_transaction, coinbase_transaction)
|
||||
|
||||
def test_reject_duplicate_in_blockchain(self):
|
||||
with session_scope() as session:
|
||||
block = Block(
|
||||
hash_prev_block=self.block.hash,
|
||||
target=self.block.target,
|
||||
transactions=[
|
||||
CoinbaseTransaction(
|
||||
address=self.wallet.new_address(),
|
||||
value=100
|
||||
),
|
||||
self.transaction
|
||||
]
|
||||
)
|
||||
self.blockchain.add(block, session, main_branch=True)
|
||||
self.assertRaisesRegex(InvalidTransactionException, "Transaction already exists in blockchain's main branch",
|
||||
self.validator.add_transaction, self.transaction)
|
||||
|
||||
def test_reject_duplicate_mempool(self):
|
||||
self.mempool[self.transaction.hash] = self.transaction
|
||||
self.assertRaisesRegex(InvalidTransactionException, "Transaction already exists in mempool",
|
||||
self.validator.add_transaction, self.transaction)
|
||||
|
||||
def test_reject_mempool_conflict(self):
|
||||
# a transaction that spends the same input as self.transaction:
|
||||
transaction = Transaction(
|
||||
inputs=[
|
||||
Input(
|
||||
prev_tx_hash=self.block.transactions[0].hash,
|
||||
txout_index=0
|
||||
)
|
||||
],
|
||||
outputs=[
|
||||
Output(
|
||||
value=100,
|
||||
address=b"another_address"
|
||||
)
|
||||
]
|
||||
)
|
||||
self.assertNotEqual(transaction.hash, self.transaction.hash)
|
||||
self.assertEqual((self.transaction.inputs[0].prev_tx_hash, self.transaction.inputs[0].txout_index),
|
||||
(transaction.inputs[0].prev_tx_hash, transaction.inputs[0].txout_index))
|
||||
self.mempool[transaction.hash] = transaction
|
||||
self.assertRaisesRegex(InvalidTransactionException, "Transaction conflicts with mempool: one or more input's referenced output is spent by another transaction in the mempool",
|
||||
self.validator.add_transaction, self.transaction)
|
||||
|
||||
def test_reject_orphan(self):
|
||||
input = Input(
|
||||
prev_tx_hash=b"nonexistent_tx",
|
||||
txout_index=123
|
||||
)
|
||||
self.transaction.inputs.append(input)
|
||||
self.assertRaises(OrphanException, self.validator.add_transaction, self.transaction)
|
||||
|
||||
def test_reject_spent(self):
|
||||
address = self.wallet.new_address()
|
||||
private_key, public_key = self.wallet.keys[address]
|
||||
block = Block(
|
||||
hash_prev_block=self.block.hash,
|
||||
target=self.block.target,
|
||||
public_key=public_bytes(public_key),
|
||||
transactions=[
|
||||
CoinbaseTransaction(
|
||||
address=address,
|
||||
value=100,
|
||||
block_height=2
|
||||
)
|
||||
] + [self.transaction]
|
||||
)
|
||||
self.validator.add_block(mine(block, private_key))
|
||||
|
||||
# a transaction that spends the same input as self.transaction:
|
||||
transaction = Transaction(
|
||||
inputs=[
|
||||
Input(
|
||||
prev_tx_hash=self.block.transactions[0].hash,
|
||||
txout_index=0
|
||||
)
|
||||
],
|
||||
outputs=[
|
||||
Output(
|
||||
value=20,
|
||||
address=b"other_address"
|
||||
)
|
||||
]
|
||||
)
|
||||
|
||||
self.assertRaisesRegex(InvalidTransactionException, "Referenced output already spent \(not in blockchain/mempool UTXO\) for one or more inputs",
|
||||
self.validator.add_transaction, transaction)
|
||||
|
||||
def test_reject_negative_fee(self):
|
||||
output = Output(
|
||||
value=10000,
|
||||
address=b"doesnt_matter"
|
||||
)
|
||||
self.transaction.outputs.append(output)
|
||||
self.assertRaisesRegex(InvalidTransactionException, "Sum of input values < sum of output values \(negative fee\)",
|
||||
self.validator.add_transaction, self.transaction)
|
||||
|
||||
def test_reject_too_low_fee(self):
|
||||
with patch("aucoin.consensus.tx_min_fee", 51): # fee of self.transaction is 50
|
||||
self.assertRaisesRegex(InvalidTransactionException, "Transaction fee too low",
|
||||
self.validator.add_transaction, self.transaction)
|
||||
|
||||
def test_reject_invalid_signature(self):
|
||||
self.transaction.inputs[0].signature = b"Wrong"
|
||||
self.assertRaisesRegex(InvalidTransactionException, "Invalid signature for one or more inputs",
|
||||
self.validator.add_transaction, self.transaction)
|
||||
|
||||
def test_reject_wrong_public_key(self):
|
||||
with session_scope() as session:
|
||||
for input, copy_hash in self.transaction.truncated_copies(self.blockchain, self.mempool, session):
|
||||
keypair = dsa.generate_keypair() # generate new, wrong, keypair
|
||||
input.public_key = public_bytes(keypair.public)
|
||||
input.signature = dsa.sign(keypair.private, copy_hash)
|
||||
|
||||
self.assertRaisesRegex(InvalidTransactionException, "Public key doesn't match address of the output it is spending for one or more inputs",
|
||||
self.validator.add_transaction, self.transaction)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
30
tests/test_wallet.py
Normal file
30
tests/test_wallet.py
Normal file
|
@ -0,0 +1,30 @@
|
|||
import logging
|
||||
import unittest
|
||||
|
||||
from aucoin import dsa
|
||||
from aucoin.blockchain import Blockchain
|
||||
from aucoin.wallet import Wallet
|
||||
|
||||
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
|
||||
|
||||
class TestWallet(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.wallet = Wallet(Blockchain())
|
||||
self.data = b"Hello, world!"
|
||||
|
||||
def test_sign(self):
|
||||
signature = self.wallet.sign(self.data)
|
||||
self.assertTrue(dsa.verify(self.wallet._public_key, self.data, signature))
|
||||
|
||||
def test_save_load(self):
|
||||
public_key_old = self.wallet.public_key
|
||||
self.wallet.save()
|
||||
self.wallet._private_key = None
|
||||
self.wallet._public_key = None
|
||||
self.wallet.load()
|
||||
self.assertEqual(self.wallet.public_key, public_key_old)
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
173
tools/find_best_tx_alg_test.py
Normal file
173
tools/find_best_tx_alg_test.py
Normal file
|
@ -0,0 +1,173 @@
|
|||
import time
|
||||
from random import randint, random
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
|
||||
from aucoin.blockchain import Blockchain
|
||||
from aucoin.mempool import Mempool
|
||||
from aucoin.miner import find_best_transactions
|
||||
|
||||
|
||||
class Transaction(object):
|
||||
|
||||
_fee = 1
|
||||
_hash = b""
|
||||
prev_tx_hash = b"sdfasfasdfasdfasdf"
|
||||
txout_index = 0
|
||||
signature = b""
|
||||
public_key = b""
|
||||
_size = 12
|
||||
|
||||
@property
|
||||
def hash(self):
|
||||
return self._hash
|
||||
|
||||
def __bytes__(self):
|
||||
return self.prev_tx_hash + \
|
||||
self.txout_index.to_bytes(4, "big") + \
|
||||
self.signature + \
|
||||
self.public_key
|
||||
|
||||
@property
|
||||
def inputs(self):
|
||||
return [self]
|
||||
|
||||
@property
|
||||
def size(self) -> int:
|
||||
"""
|
||||
:return: Size of transaction in bytes.
|
||||
"""
|
||||
return self._size
|
||||
|
||||
def fee(self, bc=None, mem=None, session=None) -> int:
|
||||
return self._fee
|
||||
|
||||
def __str__(self, *args, **kwargs):
|
||||
return f"Hash: {self._hash.hex()}"
|
||||
|
||||
|
||||
def generate_transactions(number: int, pct=70):
|
||||
txs = []
|
||||
for i in range(number):
|
||||
tx = Transaction()
|
||||
tx._hash = i.to_bytes(12, "big")
|
||||
tx._size = randint(30, 60)
|
||||
tx._fee = randint(10, 30 ) + tx._size/10
|
||||
if random() < pct/100 and len(txs) > 0:
|
||||
tx.prev_tx_hash = txs[randint(0, len(txs)-1)].hash
|
||||
## tx._fee += randint(0, 100)
|
||||
pass
|
||||
else:
|
||||
tx.prev_tx_hash = b"sdfasfasdfasdfasdf"
|
||||
|
||||
|
||||
txs.append(tx)
|
||||
return txs
|
||||
|
||||
def simple_alg(txs, max_size):
|
||||
size = 0
|
||||
total = 0
|
||||
best = []
|
||||
sorted_txs = sorted(txs, key=lambda tx: tx._fee / tx.size, reverse=True)
|
||||
for tx in sorted_txs:
|
||||
if mempool.transaction(tx.prev_tx_hash) is None:
|
||||
if size + tx.size < max_size:
|
||||
size += tx.size
|
||||
total += tx._fee
|
||||
best.append(tx)
|
||||
return best, total
|
||||
|
||||
sums_best = [[],[],[],[]]
|
||||
sums = [[],[],[],[]]
|
||||
|
||||
max_size = 2048*2
|
||||
"""
|
||||
start = 0
|
||||
end = 1000
|
||||
times_best = []
|
||||
times_simple = []
|
||||
for j in [0, 1, 2, 3]:
|
||||
transactions = generate_transactions(end, j*20)
|
||||
for i in range(start, end):
|
||||
mempool = Mempool()
|
||||
blockchain = Blockchain()
|
||||
|
||||
txs = transactions[0:i]
|
||||
for tx in txs:
|
||||
mempool[tx.hash] = tx
|
||||
|
||||
# log time and run alg.
|
||||
t = time.time()
|
||||
best, total = find_best_transactions(blockchain, mempool, max_size, None)
|
||||
times_best.append(time.time() - t)
|
||||
|
||||
# log time and run simple alg.
|
||||
t = time.time()
|
||||
simple_b, simple_total = simple_alg(txs, max_size)
|
||||
times_simple.append(time.time() - t)
|
||||
|
||||
sums_best[j].append(total)
|
||||
sums[j].append(simple_total)
|
||||
|
||||
|
||||
print(mean(sums_best[0]), mean(sums[0]))
|
||||
|
||||
print(f"Alg: {sum(times_best)}")
|
||||
print(f"Simple: {sum(times_simple)}")
|
||||
|
||||
x = range(start, end)
|
||||
|
||||
plt.figure(1)
|
||||
plt.subplot(141)
|
||||
|
||||
plt.plot(x, np.array(sums_best[0]), 'g', x, sums[0], 'r--')
|
||||
plt.xlabel('transactions in mempool')
|
||||
plt.ylabel('Total fee')
|
||||
plt.title('0% dependent')
|
||||
|
||||
plt.subplot(142)
|
||||
plt.plot(x, np.array(sums_best[1]), 'g', x, sums[1], 'r--')
|
||||
plt.title('20% dependent')
|
||||
plt.xlabel('transactions in mempool')
|
||||
|
||||
plt.subplot(143)
|
||||
plt.plot(x, np.array(sums_best[2]), 'g', x, sums[2], 'r--')
|
||||
plt.title('40% dependent')
|
||||
plt.xlabel('transactions in mempool')
|
||||
|
||||
plt.subplot(144)
|
||||
plt.plot(x, np.array(sums_best[3]), 'g', x, sums[3], 'r--')
|
||||
plt.title('60% dependent')
|
||||
plt.xlabel('transactions in mempool')
|
||||
|
||||
plt.show()
|
||||
"""
|
||||
times = []
|
||||
times_simple = []
|
||||
|
||||
for i in range(0, 100):
|
||||
mempool = Mempool()
|
||||
blockchain = Blockchain()
|
||||
txs = generate_transactions(1000, i)
|
||||
for tx in txs:
|
||||
mempool[tx.hash] = tx
|
||||
|
||||
t = time.time()
|
||||
best, total = find_best_transactions(blockchain, mempool, max_size, None)
|
||||
times.append(time.time() - t)
|
||||
t = time.time()
|
||||
simple_b, simple_total = simple_alg(txs, max_size)
|
||||
times_simple.append(time.time() - t)
|
||||
|
||||
print(f"Extended: {sum(times)}")
|
||||
print(f"Simple: {sum(times_simple)}")
|
||||
|
||||
x = range(0, 100)
|
||||
plt.figure(2)
|
||||
plt.plot(x, np.array(times)*1000, 'g', x, np.array(times_simple)*1000, 'r--')
|
||||
plt.title('Runningtime of extended (green) compared to simple (red) algorithm')
|
||||
plt.xlabel('% of dependent transactions (of 1000 in total)')
|
||||
plt.ylabel('milliseconds')
|
||||
|
||||
plt.show()
|
236
tools/network-visualization/data.json
Normal file
236
tools/network-visualization/data.json
Normal file
|
@ -0,0 +1,236 @@
|
|||
{
|
||||
"127.0.0.1": [
|
||||
"127.0.0.7",
|
||||
"127.0.0.3",
|
||||
"127.0.0.5",
|
||||
"127.0.0.2",
|
||||
"127.0.0.10",
|
||||
"127.0.0.11",
|
||||
"127.0.0.15",
|
||||
"127.0.0.19",
|
||||
"127.0.0.18",
|
||||
"127.0.0.13"
|
||||
],
|
||||
"127.0.0.10": [
|
||||
"127.0.0.1",
|
||||
"127.0.0.11",
|
||||
"127.0.0.19",
|
||||
"127.0.0.3",
|
||||
"127.0.0.21",
|
||||
"127.0.0.9",
|
||||
"127.0.0.14",
|
||||
"127.0.0.6",
|
||||
"127.0.0.13"
|
||||
],
|
||||
"127.0.0.11": [
|
||||
"127.0.0.1",
|
||||
"127.0.0.10",
|
||||
"127.0.0.2",
|
||||
"127.0.0.7",
|
||||
"127.0.0.3",
|
||||
"127.0.0.15",
|
||||
"127.0.0.17",
|
||||
"127.0.0.20",
|
||||
"127.0.0.13",
|
||||
"127.0.0.16"
|
||||
],
|
||||
"127.0.0.12": [
|
||||
"127.0.0.20",
|
||||
"127.0.0.4",
|
||||
"127.0.0.15",
|
||||
"127.0.0.16"
|
||||
],
|
||||
"127.0.0.13": [
|
||||
"127.0.0.1",
|
||||
"127.0.0.21",
|
||||
"127.0.0.7",
|
||||
"127.0.0.14",
|
||||
"127.0.0.18",
|
||||
"127.0.0.6",
|
||||
"127.0.0.17",
|
||||
"127.0.0.11",
|
||||
"127.0.0.3",
|
||||
"127.0.0.10"
|
||||
],
|
||||
"127.0.0.14": [
|
||||
"127.0.0.21",
|
||||
"127.0.0.4",
|
||||
"127.0.0.9",
|
||||
"127.0.0.6",
|
||||
"127.0.0.13",
|
||||
"127.0.0.19",
|
||||
"127.0.0.18",
|
||||
"127.0.0.17",
|
||||
"127.0.0.2",
|
||||
"127.0.0.10"
|
||||
],
|
||||
"127.0.0.15": [
|
||||
"127.0.0.1",
|
||||
"127.0.0.16",
|
||||
"127.0.0.3",
|
||||
"127.0.0.5",
|
||||
"127.0.0.11",
|
||||
"127.0.0.17",
|
||||
"127.0.0.7",
|
||||
"127.0.0.12",
|
||||
"127.0.0.20",
|
||||
"127.0.0.6"
|
||||
],
|
||||
"127.0.0.16": [
|
||||
"127.0.0.7",
|
||||
"127.0.0.15",
|
||||
"127.0.0.20",
|
||||
"127.0.0.21",
|
||||
"127.0.0.3",
|
||||
"127.0.0.11",
|
||||
"127.0.0.8",
|
||||
"127.0.0.12"
|
||||
],
|
||||
"127.0.0.17": [
|
||||
"127.0.0.6",
|
||||
"127.0.0.18",
|
||||
"127.0.0.14",
|
||||
"127.0.0.20",
|
||||
"127.0.0.13",
|
||||
"127.0.0.11",
|
||||
"127.0.0.15",
|
||||
"127.0.0.8",
|
||||
"127.0.0.5",
|
||||
"127.0.0.21"
|
||||
],
|
||||
"127.0.0.18": [
|
||||
"127.0.0.1",
|
||||
"127.0.0.6",
|
||||
"127.0.0.13",
|
||||
"127.0.0.14",
|
||||
"127.0.0.19",
|
||||
"127.0.0.17",
|
||||
"127.0.0.9",
|
||||
"127.0.0.21",
|
||||
"127.0.0.4"
|
||||
],
|
||||
"127.0.0.19": [
|
||||
"127.0.0.1",
|
||||
"127.0.0.10",
|
||||
"127.0.0.14",
|
||||
"127.0.0.9",
|
||||
"127.0.0.18",
|
||||
"127.0.0.5",
|
||||
"127.0.0.5",
|
||||
"127.0.0.4",
|
||||
"127.0.0.8",
|
||||
"127.0.0.3"
|
||||
],
|
||||
"127.0.0.2": [
|
||||
"127.0.0.1",
|
||||
"127.0.0.11",
|
||||
"127.0.0.21",
|
||||
"127.0.0.7",
|
||||
"127.0.0.7",
|
||||
"127.0.0.5",
|
||||
"127.0.0.9",
|
||||
"127.0.0.14",
|
||||
"127.0.0.9"
|
||||
],
|
||||
"127.0.0.20": [
|
||||
"127.0.0.16",
|
||||
"127.0.0.8",
|
||||
"127.0.0.17",
|
||||
"127.0.0.9",
|
||||
"127.0.0.6",
|
||||
"127.0.0.11",
|
||||
"127.0.0.4",
|
||||
"127.0.0.12",
|
||||
"127.0.0.15"
|
||||
],
|
||||
"127.0.0.21": [
|
||||
"127.0.0.14",
|
||||
"127.0.0.13",
|
||||
"127.0.0.9",
|
||||
"127.0.0.2",
|
||||
"127.0.0.4",
|
||||
"127.0.0.10",
|
||||
"127.0.0.18",
|
||||
"127.0.0.16",
|
||||
"127.0.0.17"
|
||||
],
|
||||
"127.0.0.3": [
|
||||
"127.0.0.1",
|
||||
"127.0.0.10",
|
||||
"127.0.0.11",
|
||||
"127.0.0.15",
|
||||
"127.0.0.7",
|
||||
"127.0.0.5",
|
||||
"127.0.0.16",
|
||||
"127.0.0.4",
|
||||
"127.0.0.13",
|
||||
"127.0.0.19"
|
||||
],
|
||||
"127.0.0.4": [
|
||||
"127.0.0.14",
|
||||
"127.0.0.21",
|
||||
"127.0.0.9",
|
||||
"127.0.0.19",
|
||||
"127.0.0.18",
|
||||
"127.0.0.3",
|
||||
"127.0.0.20",
|
||||
"127.0.0.12",
|
||||
"127.0.0.5",
|
||||
"127.0.0.6"
|
||||
],
|
||||
"127.0.0.5": [
|
||||
"127.0.0.1",
|
||||
"127.0.0.15",
|
||||
"127.0.0.7",
|
||||
"127.0.0.8",
|
||||
"127.0.0.3",
|
||||
"127.0.0.2",
|
||||
"127.0.0.19",
|
||||
"127.0.0.19",
|
||||
"127.0.0.4",
|
||||
"127.0.0.17"
|
||||
],
|
||||
"127.0.0.6": [
|
||||
"127.0.0.18",
|
||||
"127.0.0.14",
|
||||
"127.0.0.17",
|
||||
"127.0.0.9",
|
||||
"127.0.0.13",
|
||||
"127.0.0.20",
|
||||
"127.0.0.8",
|
||||
"127.0.0.10",
|
||||
"127.0.0.4",
|
||||
"127.0.0.15"
|
||||
],
|
||||
"127.0.0.7": [
|
||||
"127.0.0.1",
|
||||
"127.0.0.16",
|
||||
"127.0.0.13",
|
||||
"127.0.0.11",
|
||||
"127.0.0.3",
|
||||
"127.0.0.2",
|
||||
"127.0.0.2",
|
||||
"127.0.0.5",
|
||||
"127.0.0.15"
|
||||
],
|
||||
"127.0.0.8": [
|
||||
"127.0.0.5",
|
||||
"127.0.0.20",
|
||||
"127.0.0.6",
|
||||
"127.0.0.17",
|
||||
"127.0.0.16",
|
||||
"127.0.0.19"
|
||||
],
|
||||
"127.0.0.9": [
|
||||
"127.0.0.14",
|
||||
"127.0.0.21",
|
||||
"127.0.0.19",
|
||||
"127.0.0.6",
|
||||
"127.0.0.20",
|
||||
"127.0.0.4",
|
||||
"127.0.0.18",
|
||||
"127.0.0.2",
|
||||
"127.0.0.10",
|
||||
"127.0.0.2"
|
||||
]
|
||||
}
|
28
tools/network-visualization/index.html
Normal file
28
tools/network-visualization/index.html
Normal file
|
@ -0,0 +1,28 @@
|
|||
<!doctype html>
|
||||
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>Network visualization</title>
|
||||
<script src="https://d3js.org/d3.v5.min.js"></script>
|
||||
<style>
|
||||
canvas {
|
||||
position: fixed;
|
||||
top: 0;
|
||||
left: 0;
|
||||
z-index: -1;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<canvas></canvas>
|
||||
<div>
|
||||
<label for="select-json">Select JSON</label>
|
||||
<input type="file" name="select-json" id="select-json">
|
||||
<label for="text-json">Paste JSON</label>
|
||||
<input type="text" name="text-json" id="text-json">
|
||||
<p id="text"></p>
|
||||
</div>
|
||||
<script src="script.js"></script>
|
||||
</body>
|
||||
</html>
|
166
tools/network-visualization/script.js
Normal file
166
tools/network-visualization/script.js
Normal file
|
@ -0,0 +1,166 @@
|
|||
|
||||
|
||||
let input = document.getElementById('select-json');
|
||||
let textInput = document.getElementById('text-json');
|
||||
let text = document.getElementById('text');
|
||||
let simulation;
|
||||
let links;
|
||||
let nodes;
|
||||
let context;
|
||||
|
||||
textInput.onchange = function change() {
|
||||
try {
|
||||
json = JSON.parse(this.value);
|
||||
nodes = buildNodes(json);
|
||||
links = buildLinks(json);
|
||||
} catch (e) {
|
||||
alert(e.message);
|
||||
return;
|
||||
}
|
||||
render(json);
|
||||
}
|
||||
|
||||
input.onchange = function change() {
|
||||
|
||||
var reader = new FileReader();
|
||||
reader.onload = () => {
|
||||
let text = reader.result;
|
||||
try {
|
||||
json = JSON.parse(text);
|
||||
nodes = buildNodes(json);
|
||||
links = buildLinks(json);
|
||||
} catch (e) {
|
||||
alert(e.message);
|
||||
return;
|
||||
}
|
||||
render(json);
|
||||
}
|
||||
reader.readAsText(this.files[0]);
|
||||
};
|
||||
|
||||
function buildNodes(json) {
|
||||
let keys = Object.keys(json);
|
||||
return keys.map(key => ({ id: key, group: 1 }));
|
||||
}
|
||||
|
||||
function buildLinks(json) {
|
||||
let keys = Object.keys(json);
|
||||
let lengths = {};
|
||||
keys.forEach(key => lengths[key] = json[key].length);
|
||||
return keys.reduce((acc, key) => {
|
||||
let list = [];
|
||||
json[key].forEach(t => {
|
||||
if (keys.indexOf(t) > -1) {
|
||||
list.push({ source: key, target: t });
|
||||
}
|
||||
});
|
||||
return acc.concat(list);
|
||||
}, []);
|
||||
}
|
||||
|
||||
function render(json) {
|
||||
let n = nodes.length;
|
||||
text.innerText = `Total of ${n} nodes`;
|
||||
let canvas = document.querySelector("canvas");
|
||||
|
||||
let width = window.innerWidth,
|
||||
height = window.innerHeight;
|
||||
|
||||
context = canvas.getContext("2d");
|
||||
d3.select(canvas)
|
||||
.attr('width', width * window.devicePixelRatio)
|
||||
.attr('height', height * window.devicePixelRatio)
|
||||
.style('width', width + 'px')
|
||||
.style('height', height + 'px');
|
||||
context.scale(window.devicePixelRatio, window.devicePixelRatio);
|
||||
|
||||
simulation = d3.forceSimulation()
|
||||
.force('link', d3.forceLink().id(d => d.id).distance(300))
|
||||
.force('charge', d3.forceManyBody())
|
||||
.force('center', d3.forceCenter(width / 2, height / 2));
|
||||
|
||||
simulation.nodes(nodes)
|
||||
.on('tick', ticked);
|
||||
|
||||
simulation.force('link')
|
||||
.links(links);
|
||||
|
||||
d3.select(canvas)
|
||||
.call(d3.drag()
|
||||
.container(canvas)
|
||||
.subject(dragsubject)
|
||||
.on('start', dragstarted)
|
||||
.on('drag', dragged)
|
||||
.on('end', dragended));
|
||||
|
||||
function ticked() {
|
||||
context.clearRect(0, 0, width, height);
|
||||
|
||||
|
||||
links.forEach(drawLink);
|
||||
nodes.forEach(drawNode);
|
||||
}
|
||||
|
||||
function dragsubject() {
|
||||
return simulation.find(d3.event.x, d3.event.y);
|
||||
}
|
||||
}
|
||||
|
||||
function dragstarted() {
|
||||
if (!d3.event.active) simulation.alphaTarget(0.3).restart();
|
||||
d3.event.subject.fx = d3.event.subject.x;
|
||||
d3.event.subject.fy = d3.event.subject.y;
|
||||
}
|
||||
|
||||
function dragged() {
|
||||
d3.event.subject.fx = d3.event.x;
|
||||
d3.event.subject.fy = d3.event.y;
|
||||
}
|
||||
|
||||
function dragended() {
|
||||
if (!d3.event.active) simulation.alphaTarget(0);
|
||||
d3.event.subject.fx = null;
|
||||
d3.event.subject.fy = null;
|
||||
}
|
||||
|
||||
function drawLink(d) {
|
||||
context.beginPath();
|
||||
context.moveTo(d.source.x, d.source.y);
|
||||
context.lineTo(d.target.x, d.target.y);
|
||||
context.lineWidth = 3;
|
||||
if (d.target.id === "127.0.0.1" || d.source.id === "127.0.0.1") {
|
||||
context.strokeStyle = "red";
|
||||
} else if (d.target.id === "127.0.0.2" || d.source.id === "127.0.0.2") {
|
||||
context.strokeStyle = "green";
|
||||
} else {
|
||||
context.strokeStyle = "#aaa";
|
||||
}
|
||||
context.stroke();
|
||||
|
||||
}
|
||||
|
||||
function drawNode(d) {
|
||||
context.beginPath();
|
||||
context.moveTo(d.x + 10, d.y);
|
||||
context.arc(d.x, d.y, 10, 0, 2 * Math.PI);
|
||||
context.lineWidth = 1;
|
||||
if (d.id === "127.0.0.1") {
|
||||
context.fillStyle = "red";
|
||||
} else if (d.id === "127.0.0.2" ) {
|
||||
context.fillStyle = "green";
|
||||
} else {
|
||||
context.fillStyle = "black";
|
||||
}
|
||||
context.fill();
|
||||
context.strokeStyle = "#fff";
|
||||
context.stroke();
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
182
tools/network_test.py
Normal file
182
tools/network_test.py
Normal file
|
@ -0,0 +1,182 @@
|
|||
import json
|
||||
import os
|
||||
import statistics
|
||||
import subprocess
|
||||
import time
|
||||
from collections import defaultdict
|
||||
from http.server import BaseHTTPRequestHandler, HTTPServer
|
||||
from threading import Thread, Lock
|
||||
|
||||
|
||||
def make_handler(timestamps, connectivity, num_nodes, callback):
|
||||
class TimestampHandler(BaseHTTPRequestHandler, object):
|
||||
def __init__(self, request, client_address, server):
|
||||
self.timestamps = timestamps
|
||||
super().__init__(request, client_address, server)
|
||||
|
||||
def _send_response(self):
|
||||
self.send_response(200)
|
||||
self.send_header("Content-type", "text/html")
|
||||
self.end_headers()
|
||||
|
||||
def do_GET(self):
|
||||
print("Get request", str(self.path))
|
||||
self._send_response()
|
||||
|
||||
def do_POST(self):
|
||||
content_length = int(self.headers["Content-Length"])
|
||||
transaction_hash = self.rfile.read(content_length)
|
||||
|
||||
t = time.time()
|
||||
self.timestamps.append(t)
|
||||
|
||||
print(f"POST {transaction_hash.hex()} at {t}")
|
||||
print(f"Number of notifications: {len(self.timestamps)}/{num_nodes}")
|
||||
|
||||
if len(self.timestamps) == num_nodes:
|
||||
print("Number of notifications is equal to the number of nodes; closing server and calling callback")
|
||||
self.server.server_close()
|
||||
callback(self.timestamps)
|
||||
|
||||
self._send_response()
|
||||
|
||||
def do_PUT(self):
|
||||
content_length = int(self.headers["Content-Length"])
|
||||
peer_info_bytes = self.rfile.read(content_length)
|
||||
peer_info = json.loads(peer_info_bytes.decode(encoding="ascii"))
|
||||
|
||||
connectivity[peer_info["node"]] = peer_info["peerlist"]
|
||||
|
||||
self._send_response()
|
||||
|
||||
return TimestampHandler
|
||||
|
||||
|
||||
def print_stats(timestamps, connectivity):
|
||||
print("-----------------------------------------")
|
||||
print(f"Received notification from {len(timestamps)} nodes")
|
||||
|
||||
first = timestamps[0]
|
||||
last = timestamps[-1]
|
||||
print("First timestamp at", first)
|
||||
print("Last timestamp at", last)
|
||||
print("Difference:", (last - first) * 1000, "ms")
|
||||
print()
|
||||
print("Median time it took:", statistics.median([ts - first for ts in timestamps[1:]]) * 1000, "ms")
|
||||
print("Average time it took:", statistics.mean([ts - first for ts in timestamps[1:]]) * 1000, "ms")
|
||||
|
||||
print("Connectivity:")
|
||||
print(json.dumps(connectivity))
|
||||
|
||||
|
||||
def graph(timestamps):
|
||||
pass
|
||||
|
||||
|
||||
def start_node(miners, max_peers, interface, seed, delay):
|
||||
return subprocess.Popen(["python3.6", "-m", "aucoin",
|
||||
"--miners", str(miners),
|
||||
"--max-peers", str(max_peers),
|
||||
"--interface", interface,
|
||||
"--seed", seed, # 192.0.2.0 doesn't exist according to RFC
|
||||
"--notify-url", "http://localhost:8080",
|
||||
"--delay", str(delay),
|
||||
"--no-catch-up",
|
||||
"-v",
|
||||
"--clean"],
|
||||
env=dict(os.environ, HOME=f"~/.aucoin-test/{interface}"))
|
||||
|
||||
|
||||
def run_experiment(num_nodes):
|
||||
results = defaultdict(dict) # results[delay][max_peers] = timestamps
|
||||
continue_lock = Lock()
|
||||
for delay in [1]:
|
||||
for max_peers in (4,):
|
||||
continue_lock.acquire()
|
||||
print("==================================================================")
|
||||
print(f"Running experiment with {num_nodes} nodes at {delay}ms delay and max_peers={max_peers}")
|
||||
|
||||
print("Please start seed node manually using:")
|
||||
print("VVV")
|
||||
print(f"python3.6 -m aucoin --miners=0 --max-peers={max_peers} --interface=127.0.0.1 --seed=192.0.2.0 --notify-url http://localhost:8080 --delay={delay} --no-catch-up -v")
|
||||
print("^^^")
|
||||
# input("Press any key when done to continue")
|
||||
|
||||
#print("Waiting 1s for the seed node to fail connecting to non-existent seed..")
|
||||
#time.sleep(1)
|
||||
print("Continuing..")
|
||||
|
||||
print("Starting other nodes..")
|
||||
nodes = []
|
||||
for n in range(2, num_nodes+2): # start at 127.0.0.2
|
||||
node = start_node(miners=0, max_peers=max_peers, interface=f"127.0.0.{n}", seed="127.0.0.1", delay=delay)
|
||||
nodes.append(node)
|
||||
|
||||
print("Sleeping 10s to allow establishing connections..")
|
||||
time.sleep(5)
|
||||
print("Continuing..")
|
||||
|
||||
def all_nodes_received_transaction_callback(timestamps):
|
||||
print("Killing all nodes..")
|
||||
for node in nodes:
|
||||
node.kill()
|
||||
|
||||
time.sleep(1)
|
||||
print("VVV")
|
||||
print("Please Ctrl+C the seed node")
|
||||
print("^^^")
|
||||
input("Press any key when done..")
|
||||
|
||||
print("Saving and printing stats..")
|
||||
results[delay][max_peers] = timestamps
|
||||
print_stats(timestamps, connectivity)
|
||||
|
||||
print("=== RESULTS ===")
|
||||
print(json.dumps(results))
|
||||
print("===============")
|
||||
|
||||
continue_lock.release()
|
||||
|
||||
print("Starting web server in other thread")
|
||||
timestamps = []
|
||||
connectivity = {}
|
||||
http_server = HTTPServer(("0.0.0.0", 8080), make_handler(timestamps, connectivity, num_nodes + 1, all_nodes_received_transaction_callback)) # num_nodes + 1 accounts for seed node
|
||||
http_thread = Thread(target=http_server.serve_forever)
|
||||
http_thread.start()
|
||||
print("Server started on port 8080")
|
||||
|
||||
print("Waiting 2s for the webserver to start up..")
|
||||
time.sleep(2)
|
||||
print("Continuing..")
|
||||
|
||||
print("Please send a transaction from the seed node, e.g.:")
|
||||
print("VVV")
|
||||
print("send aabb 10")
|
||||
print("^^^")
|
||||
|
||||
print("Waiting for all peers to receive transaction..")
|
||||
for node in nodes:
|
||||
node.wait()
|
||||
|
||||
# The flow continues in all_nodes_received_transaction_callback()
|
||||
|
||||
|
||||
def plot():
|
||||
times = [962, 843, 1329, 1142, 1078, 1223, 1406, 1492]
|
||||
worst = [1879, 1979, 2039, 2149, 2185, 2655, 2304, 3190]
|
||||
nodes = [10, 20, 30, 50, 75, 100, 125, 150]
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import matplotlib.patches as mpatches
|
||||
|
||||
plt.plot(nodes, times, 'r-x', nodes, worst, 'b-x')
|
||||
plt.xlabel('number of peers')
|
||||
plt.ylabel('time in milliseconds from transaction was sent')
|
||||
red = mpatches.Patch(color='red', label='median')
|
||||
green = mpatches.Patch(color='blue', label='last record')
|
||||
plt.legend(handles=[green, red])
|
||||
plt.show()
|
||||
|
||||
if __name__ == '__main__':
|
||||
run_experiment(num_nodes=70)
|
||||
# plot()
|
57
tools/plotting.py
Normal file
57
tools/plotting.py
Normal file
|
@ -0,0 +1,57 @@
|
|||
import json
|
||||
from collections import deque
|
||||
from statistics import mean
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
from aucoin import config
|
||||
|
||||
|
||||
timestamps = []
|
||||
data = []
|
||||
with open(config.data_dir.joinpath("statistics/stats.json")) as file:
|
||||
for line in file.readlines():
|
||||
stat = json.loads(line)
|
||||
|
||||
d = stat["data"]
|
||||
timestamp = d["blockchain"]["header"]["timestamp"]
|
||||
|
||||
timestamps.append(timestamp)
|
||||
data.append(d)
|
||||
|
||||
|
||||
# remove first 10 data points because the timestamp of the genesis block may cause inaccuracies
|
||||
timestamps = timestamps[10:]
|
||||
data = data[10:]
|
||||
|
||||
# plot
|
||||
fig, ax1 = plt.subplots()
|
||||
|
||||
ax1.set_xlabel('timestamp (UTC)')
|
||||
|
||||
ax1.plot(timestamps, [d["blockchain"]["average_block_timespan"]["since_genesis"] for d in data], 'c--')
|
||||
ax1.set_ylabel('average block timespan since genesis', color='c')
|
||||
ax1.tick_params('y', colors='c')
|
||||
|
||||
ax2 = ax1.twinx()
|
||||
ax2.plot(timestamps, [d["blockchain"]["header"]["difficulty"] for d in data], 'r-')
|
||||
ax2.set_ylabel('difficulty', color='r')
|
||||
ax2.tick_params('y', colors='r')
|
||||
|
||||
ax3 = ax1.twinx()
|
||||
ax3.plot(timestamps, [d["blockchain"]["header"]["timespan"] for d in data], 'g.')
|
||||
ax3.set_ylabel('timespan last block', color='g')
|
||||
ax3.tick_params('y', colors='g')
|
||||
|
||||
# ax4 = ax2.twinx()
|
||||
# ax4.plot(timestamps, [d.hashrate for d in data], 'y-')
|
||||
# ax4.set_ylabel('hashrate', color='y')
|
||||
# ax4.tick_params('y', colors='y')
|
||||
|
||||
ax5 = ax2.twinx()
|
||||
ax5.plot(timestamps, [d["blockchain"]["average_block_timespan"]["last_100"] for d in data], 'b-')
|
||||
ax5.set_ylabel('average block timespan last 100', color='b')
|
||||
ax5.tick_params('y', colors='b')
|
||||
|
||||
fig.tight_layout()
|
||||
plt.show()
|
166
tools/simulator.py
Normal file
166
tools/simulator.py
Normal file
|
@ -0,0 +1,166 @@
|
|||
import logging.config
|
||||
import shutil
|
||||
import math
|
||||
from collections import deque
|
||||
from collections import namedtuple
|
||||
from random import random
|
||||
from statistics import mean
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import matplotlib.mlab as mlab
|
||||
import numpy
|
||||
|
||||
from aucoin import config
|
||||
from aucoin import consensus
|
||||
from aucoin import util
|
||||
from aucoin.block import Block
|
||||
from aucoin.blockchain import Blockchain
|
||||
from aucoin.database import session_scope
|
||||
from aucoin.transactions import CoinbaseTransaction
|
||||
|
||||
logging.config.dictConfig(config.logging(console_level="DEBUG"))
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def simulate_mining(iterations=10_000, hashrate=100_000):
|
||||
shutil.rmtree(config.data_dir, ignore_errors=True)
|
||||
util.make_data_dirs("logs/")
|
||||
|
||||
time = 0
|
||||
blockchain = Blockchain()
|
||||
|
||||
timestamps = []
|
||||
data = []
|
||||
dq = deque(maxlen=100)
|
||||
Stat = namedtuple("Stat", ("difficulty", "timespan", "average_timespan_total", "average_timespan_dq", "hashrate"))
|
||||
|
||||
with session_scope() as session:
|
||||
blockchain.genesis_block(session).timestamp = 0
|
||||
time += 1
|
||||
hash_rates = generate_hashrates(n=iterations, hashrate=hashrate)
|
||||
for i in range(iterations):
|
||||
print("Iteration:", i)
|
||||
hashrate = hash_rates[i]
|
||||
prev_block = blockchain.header(session)
|
||||
block = Block(
|
||||
hash_prev_block=prev_block._hash,
|
||||
timestamp=time,
|
||||
transactions=[CoinbaseTransaction(
|
||||
address=b"",
|
||||
value=0,
|
||||
block_height=prev_block.height + 1
|
||||
)]
|
||||
)
|
||||
block.target = consensus.required_target(block, blockchain, session, 2, 0, 0)
|
||||
|
||||
# block.difficulty is the expected/average number of attempts that were necessary to mine it.
|
||||
# https://bitcoin.stackexchange.com/questions/25293/probablity-distribution-of-mining
|
||||
# https://bitcoin.stackexchange.com/questions/4690/what-is-the-standard-deviation-of-block-generation-times
|
||||
# http://r6.ca/blog/20180225T160548Z.html
|
||||
time += numpy.random.poisson(block.difficulty // hashrate)
|
||||
|
||||
blockchain.add(block, session)
|
||||
blockchain.set_header(block, session)
|
||||
|
||||
# save data
|
||||
timestamps.append(time)
|
||||
timespan = block.timestamp - prev_block.timestamp
|
||||
dq.append(timespan)
|
||||
data.append(Stat(
|
||||
block.difficulty,
|
||||
timespan,
|
||||
blockchain.average_block_timespan(session),
|
||||
mean(dq),
|
||||
hashrate
|
||||
))
|
||||
"""
|
||||
# plot
|
||||
fig, ax1 = plt.subplots()
|
||||
ax1.plot(timestamps, [d.average_timespan_total for d in data], 'c--')
|
||||
ax1.set_xlabel('time (s)')
|
||||
# Make the y-axis label, ticks and tick labels match the line color.
|
||||
ax1.set_ylabel('average block timespan since genesis', color='c')
|
||||
ax1.tick_params('y', colors='c')
|
||||
|
||||
ax2 = ax1.twinx()
|
||||
ax2.plot(timestamps, [d.difficulty for d in data], 'r-')
|
||||
ax2.set_ylabel('difficulty', color='r', labelpad=20)
|
||||
ax2.tick_params('y', colors='r')
|
||||
|
||||
#ax3 = ax1.twinx()
|
||||
#ax3.plot(timestamps, [d.timespan for d in data], 'g', marker=",", linestyle="")
|
||||
#ax3.set_ylabel('timespan last block', color='g')
|
||||
#ax3.tick_params('y', colors='g')
|
||||
|
||||
ax4 = ax2.twinx()
|
||||
ax4.plot(timestamps, [d.hashrate for d in data], 'y-')
|
||||
ax4.set_ylabel('hashrate', color='y')
|
||||
ax4.tick_params('y', colors='y')
|
||||
|
||||
ax5 = ax1.twiny()
|
||||
ax2.yaxis.set_visible(True)
|
||||
ax5.plot(timestamps, [d.average_timespan_dq for d in data], 'b-')
|
||||
ax5.set_ylabel('average block timespan last 100', color='b')
|
||||
ax5.tick_params('y', colors='b')
|
||||
|
||||
ax1.plot(timestamps, [60 for t in timestamps], '-k')
|
||||
|
||||
fig.tight_layout()
|
||||
"""
|
||||
|
||||
plt.subplot(211)
|
||||
|
||||
genesis, = plt.plot(timestamps, [d.average_timespan_total for d in data], 'c--', label='average block timespan since genesis')
|
||||
|
||||
avg, = plt.plot(timestamps, [d.average_timespan_dq for d in data], 'b-', label='average block timespan last 100')
|
||||
target, = plt.plot(timestamps, [60 for t in timestamps], '-k', label='target block time')
|
||||
plt.gca().set_ylabel('time (s)')
|
||||
plt.gca().set_xlabel('time (s)')
|
||||
|
||||
ax1 = plt.gca()
|
||||
|
||||
ax2 = ax1.twinx()
|
||||
|
||||
ax2.plot(timestamps, [d.difficulty for d in data], 'r-')
|
||||
ax2.set_ylabel('difficulty', color='r')
|
||||
ax2.tick_params('y', colors='r')
|
||||
|
||||
ax4 = ax1.twinx()
|
||||
ax4.plot(timestamps, [d.hashrate for d in data], 'y-')
|
||||
ax4.set_ylabel('hashrate', color='y')
|
||||
ax4.tick_params('y', colors='y', pad=50)
|
||||
|
||||
plt.legend(handles=[genesis, avg, target],
|
||||
bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
|
||||
borderaxespad=0.)
|
||||
|
||||
plt.subplot(212)
|
||||
data = [d.timespan for d in data]
|
||||
n, bins, _ = plt.hist(data, bins=len(numpy.unique(data)), density=True, stacked=True)
|
||||
|
||||
mu = 60
|
||||
y = [(math.exp(-mu) * mu**i / math.factorial(i)) for i in range(len(bins))]
|
||||
plt.plot(bins, y, 'y--')
|
||||
plt.gca().set_xlabel('block time (s)')
|
||||
plt.gca().set_ylabel('percentage of mined blocks')
|
||||
|
||||
plt.gcf().tight_layout()
|
||||
plt.show()
|
||||
|
||||
def generate_hashrates(hashrate=100_000, n=10_000, ret=5.42123/365/60/24, vol=0.14789/60):
|
||||
# Monte Carlo simulation based on http://www.pythonforfinance.net/2016/11/28/monte-carlo-simulation-in-python/
|
||||
# The return and volatility of bitcoin hash rate between may 2017 and may 2018
|
||||
hash_rates = [hashrate]
|
||||
# one year days * 60 * 24
|
||||
changes = numpy.random.normal(ret, vol, n) + 1
|
||||
|
||||
for x in changes:
|
||||
hash_rates.append(x * hash_rates[-1])
|
||||
|
||||
#plt.plot(hash_rates)
|
||||
#plt.show()
|
||||
return hash_rates
|
||||
|
||||
numpy.random.seed(6)
|
||||
#generate_hashrates()
|
||||
simulate_mining(3000)
|
56
tools/stress.py
Normal file
56
tools/stress.py
Normal file
|
@ -0,0 +1,56 @@
|
|||
import logging
|
||||
import random
|
||||
import time
|
||||
from threading import Thread
|
||||
|
||||
from aucoin.core import Core
|
||||
from aucoin.database import session_scope
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Stressor(object):
|
||||
def __init__(self, core: Core, receiver_address=None):
|
||||
self.core = core
|
||||
self.receiver_address = receiver_address
|
||||
|
||||
logger.info("Starting stressor")
|
||||
|
||||
Thread(target=self.sender, daemon=True).start()
|
||||
|
||||
def sender(self):
|
||||
# Receiver address is either provided by parameter or chosen at random. We set it here (in the thread) instead
|
||||
# of in the constructor because random_address() might loop.
|
||||
receiver_address = self.receiver_address or self.random_address()
|
||||
logger.info("Chosen receiver address: %s", receiver_address.hex())
|
||||
|
||||
while True:
|
||||
time.sleep(1)
|
||||
|
||||
with self.core.lock:
|
||||
balance = sum(self.core.wallet.balance) # sum of confirmed and unconfirmed balance
|
||||
|
||||
while balance >= 110:
|
||||
amount = random.randint(10, 100)
|
||||
fee = random.randint(0, 10)
|
||||
|
||||
logger.info("Sending %s auc to %s with %s auc fee", amount, receiver_address.hex(), fee)
|
||||
self.core.wallet.make_transaction(receiver_address, amount, fee)
|
||||
|
||||
balance -= amount + fee
|
||||
|
||||
def random_address(self):
|
||||
logger.debug("Choosing random receiver address..")
|
||||
# Loop until we get an address because the blockchain might be empty (or only contain our addresses), in which
|
||||
# case we need to keep trying until somebody else mines a block.
|
||||
receiver_address = None
|
||||
while receiver_address is None:
|
||||
with session_scope() as session:
|
||||
# Known addresses are all addresses on the chain minus the address of the genesis block and ourselves
|
||||
known_addresses = self.core.blockchain.known_addresses(session) - {bytes(32)} - self.core.wallet.addresses
|
||||
|
||||
if known_addresses:
|
||||
return random.choice(tuple(known_addresses)) # random.choice doesn't work with sets
|
||||
|
||||
time.sleep(1)
|
||||
continue
|
Loading…
Reference in a new issue