Signed-off-by: eternal-flame-AD <yume@yumechi.jp>
This commit is contained in:
ゆめ 2024-11-02 19:50:14 -05:00
commit 5095bcdcdd
No known key found for this signature in database
17 changed files with 6242 additions and 0 deletions

1
.gitignore vendored Normal file
View file

@ -0,0 +1 @@
/target

2683
Cargo.lock generated Normal file

File diff suppressed because it is too large Load diff

54
Cargo.toml Normal file
View file

@ -0,0 +1,54 @@
[package]
name = "replikey"
version = "0.1.0"
edition = "2021"
[dependencies]
clap = { version = "4.5.20", features = ["derive"] }
env_logger = "0.11.5"
log = "0.4"
rand_core = { version = "0.6.4", features = ["getrandom"] }
x509-parser = "0.16.0"
thiserror = "1.0.66"
time = { version = "0.3.36", optional = true }
aes-gcm = { version = "0.10.3", optional = true }
sha2 = { version = "0.10.8", optional = true }
argon2 = { version = "0.5.3", optional = true }
rpassword = { version = "7.3.1", optional = true }
rcgen = { version = "0.13.1", optional = true, features = ["crypto", "pem", "x509-parser"] }
pem-rfc7468 = { version = "0.7.0", features = ["alloc"], optional = true }
toml = { version = "0.8.19", optional = true }
reqwest = { version = "0.12.9", optional = true, default-features = false, features = ["rustls-tls"] }
openssl = { version = "0.10.68", optional = true }
tokio-rustls = { version = "0.26.0", optional = true }
serde = { version = "1.0.214", features = ["derive"], optional = true }
sqlx = { version = "0.8.2", optional = true, default-features = false, features = ["tls-none", "postgres"] }
tokio = { version = "1.41.0", features = ["rt", "rt-multi-thread", "macros", "net", "io-util", "sync"], optional = true }
rustls = { version = "0.23.16", optional = true }
async-compression = { version = "0.4.17", optional = true, features = ["tokio", "zstd"] }
[features]
default = ["keygen", "networking", "service", "remote-crl", "setup-postgres"]
asyncio = ["dep:tokio"]
keygen = ["dep:rcgen", "dep:pem-rfc7468", "dep:rpassword", "dep:argon2", "dep:sha2", "dep:aes-gcm", "dep:time"]
networking = ["asyncio", "dep:tokio-rustls", "dep:rustls", "dep:async-compression"]
test-crosscheck-openssl = ["dep:openssl"]
serde = ["dep:serde"]
service = ["serde", "networking", "dep:toml"]
remote-crl = ["dep:reqwest"]
setup-postgres = ["dep:sqlx"]
stat-service = ["networking", "serde"]
rustls = ["dep:rustls"]
async-compression = ["dep:async-compression"]
[[bin]]
name = "replikey"
path = "src/bin/replikey.rs"
required-features = ["keygen"]
[dev-dependencies]
tempfile = "3.13.0"
[profile.release]
lto = true

201
LICENSE Normal file
View file

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [2024] [Yumechi yume@yumechi.jp]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

151
README.md Normal file
View file

@ -0,0 +1,151 @@
# replikey
Misskey logical replication tool for replication over insecure connections.
Current development status:
- Automated tested: Step 1,2,3,4,5
- Works on my machine: Step 6,7,8
- Docs: Later :)
## Architecture
This is essentially a DB configuration tool and a Web PKI CA workflow and an mTLS proxy combined into one with feature flags to enable or disable each part.
Network architecture is as follows:
![Network Architecture](./doc/architecture.svg)
For postgres the tool has automation for setting up official logical replication, for redis it was just one command so didn't bother automating it.
## Setup Workflow
Overview, we want to do these:
1. Create a root CA for authenticating the server and client
2. Create a server certificate for the Misskey instance
3. Create a client certificate for the replication client
4. Sign the server and client certificates
5. Set up Postgres for replication
6. Test the connection
7. Integrate the program into docker-compose
8. Start the replication
### 1. Create a root CA (you will be prompted for a password to encrypt the key)
```sh
replikey cert create-ca --valid-days 1825 --dn-common-name "MyInstance Replication Root Certificate Authority" -o ca-certs
```
### 2. Create a server CSR, SAN can be any number of combinations of DNS and IP addresses
If you use DNS name SAN, all SNIs you later use must match one of the DNS name or wildcard in the SAN
If you use IP address SAN, all connections (supposedly) to your IP address will be considered from your server
```sh
replikey cert create-server --valid-days 365 --dn-common-name "MyInstance Production Server" -d '*.replication.myinstance.com' --ip-address 123.123.123.123 -o server-certs
```
### 3. Sign the server CSR
```sh
replikey cert sign-server-csr --valid-days 365 --ca-dir ca-certs --input-csr server-certs/server.csr --output server-certs-signed.pem
Enter password:
CSR Params:
Serial number: 7b6a82c3d9171f7ba8fbd8973aac0146dac611dd
SAN: DNS=*.replication.myinstance.com
SAN: IP=123.123.123.123
Not before: 2024-11-02 22:43:56.751788095 +00:00:00
Not after: 2025-11-02 22:43:56.751783366 +00:00:00
Distinguished name: DistinguishedName { entries: {CommonName: Utf8String("MyInstance Production Server")}, order: [CommonName] }
Key usages: [DigitalSignature, DataEncipherment]
Extended key usages: [ServerAuth]
CRL distribution points: []
Do you want to sign this CSR? (YES/NO)
IMPORTANT: Keep this certificate or its serial number for revocation
```
### 4. Create a client CSR (for each client)
Ideally the workflow is the client should generate their own CSR and send it to you, you sign the certificate and send it back to them.
```sh
replikey cert create-client --valid-days 365 \
--dn-common-name "MyInstance Replication Client" \
-o client-certs
```
### 5. Sign the client CSR (for each client)
```sh
replikey cert sign-client-csr --valid-days 365 \
--ca-dir ca-certs \
--input-csr client-certs/client.csr \
--output client-certs-signed.pem
```
### BTW.0 Later if you want to revoke a certificate, generate a CRL with the following command, then pass a URL or path to the CRL(s) to any networking command via the --crl option
replikey cert generate-crl --ca-dir ca-certs --serial abcdef --serial 123456 --output revoked.crl
### 6. Check your certificates can communicate, this is just a zstd wrapper around rustls, so you should be able to use any TLS client or server
```sh
replikey network reverse-proxy --listen 0.0.0.0:8443 \
--redis-sni localhost --redis-target 127.0.0.1:22 \
--postgres-sni postgres --postgres-target 127.0.0.1:8441 \
--cert server-signed.pem --key test-server/server.key \
--ca test-ca/ca.pem &
# this SNI MUST match one of the dns name in the server certificate or the IP address is signed (not recommended)
replikey network forward-proxy --listen 0.0.0.0:8444 \
--sni localhost --target localhost:8443 \
--cert client-signed.pem --key test-client/client.key \
--ca test-ca/ca.pem &
ssh -p8444 localhost # this should work
```
### 7. Prepare the replication server for connection
Login to your master Misskey instance postgres and create a user for connection. You do not have to and should not grant any permissions to the replication user
```sql
CREATE ROLE replication WITH REPLICATION LOGIN ENCRYPTED PASSWORD 'password';
```
### BTW.1 Table names for checking replication status
```
pg_catalog.pg_publication
pg_catalog.pg_subscription
pg_catalog.pg_stat_subscription
```
### 8. Create postgres publication on the master side
```sh
# DATABASE_URL should be _the_ connection string Misskey uses to connect to the database
replikey setup-postgres-master setup --must-not-exist --publication "my_name"
replikey setup-postgres-master drop-table --publication "my_name" -t auth_session -t password_reset_request -t access_token
```
### 9. Prepare postgres slave on the slave side
```sh
# DATABASE_URL should be any valid connection string to the master database, probably the user you created in step 7
replikey setup-postgres-slave setup --must-not-exist --subscription "my_subscription_name" --publication "my_name"
```
### 10. Set redis slave to replicate from the master
```sh
# replace REDIS_PROXY with the address of the redis TLS proxy listener
# replace PORT with the port of the redis TLS proxy listener
redis-cli 'REPLICAOF REDIS_PROXY PORT'
```
### Integration into docker-compose:
WIP, but I have `replikey service` subcommand for running the proxies with environment variables or config files and optionally set up the replication on startup.

250
deny.toml Normal file
View file

@ -0,0 +1,250 @@
# This template contains all of the possible sections and their default values
# Note that all fields that take a lint level have these possible values:
# * deny - An error will be produced and the check will fail
# * warn - A warning will be produced, but the check will not fail
# * allow - No warning or error will be produced, though in some cases a note
# will be
# The values provided in this template are the default values that will be used
# when any section or field is not specified in your own configuration
# Root options
# The graph table configures how the dependency graph is constructed and thus
# which crates the checks are performed against
[graph]
# If 1 or more target triples (and optionally, target_features) are specified,
# only the specified targets will be checked when running `cargo deny check`.
# This means, if a particular package is only ever used as a target specific
# dependency, such as, for example, the `nix` crate only being used via the
# `target_family = "unix"` configuration, that only having windows targets in
# this list would mean the nix crate, as well as any of its exclusive
# dependencies not shared by any other crates, would be ignored, as the target
# list here is effectively saying which targets you are building for.
targets = [
# The triple can be any string, but only the target triples built in to
# rustc (as of 1.40) can be checked against actual config expressions
"x86_64-unknown-linux-gnu",
"x86_64-unknown-linux-musl",
# You can also specify which target_features you promise are enabled for a
# particular target. target_features are currently not validated against
# the actual valid features supported by the target architecture.
#{ triple = "wasm32-unknown-unknown", features = ["atomics"] },
]
# When creating the dependency graph used as the source of truth when checks are
# executed, this field can be used to prune crates from the graph, removing them
# from the view of cargo-deny. This is an extremely heavy hammer, as if a crate
# is pruned from the graph, all of its dependencies will also be pruned unless
# they are connected to another crate in the graph that hasn't been pruned,
# so it should be used with care. The identifiers are [Package ID Specifications]
# (https://doc.rust-lang.org/cargo/reference/pkgid-spec.html)
#exclude = []
# If true, metadata will be collected with `--all-features`. Note that this can't
# be toggled off if true, if you want to conditionally enable `--all-features` it
# is recommended to pass `--all-features` on the cmd line instead
all-features = true
# If true, metadata will be collected with `--no-default-features`. The same
# caveat with `all-features` applies
no-default-features = false
# If set, these feature will be enabled when collecting metadata. If `--features`
# is specified on the cmd line they will take precedence over this option.
#features = []
# The output table provides options for how/if diagnostics are outputted
[output]
# When outputting inclusion graphs in diagnostics that include features, this
# option can be used to specify the depth at which feature edges will be added.
# This option is included since the graphs can be quite large and the addition
# of features from the crate(s) to all of the graph roots can be far too verbose.
# This option can be overridden via `--feature-depth` on the cmd line
feature-depth = 1
# This section is considered when running `cargo deny check advisories`
# More documentation for the advisories section can be found here:
# https://embarkstudios.github.io/cargo-deny/checks/advisories/cfg.html
[advisories]
# The path where the advisory databases are cloned/fetched into
#db-path = "$CARGO_HOME/advisory-dbs"
# The url(s) of the advisory databases to use
#db-urls = ["https://github.com/rustsec/advisory-db"]
# A list of advisory IDs to ignore. Note that ignored advisories will still
# output a note when they are encountered.
ignore = [
#"RUSTSEC-0000-0000",
#{ id = "RUSTSEC-0000-0000", reason = "you can specify a reason the advisory is ignored" },
#"a-crate-that-is-yanked@0.1.1", # you can also ignore yanked crate versions if you wish
#{ crate = "a-crate-that-is-yanked@0.1.1", reason = "you can specify why you are ignoring the yanked crate" },
]
# If this is true, then cargo deny will use the git executable to fetch advisory database.
# If this is false, then it uses a built-in git library.
# Setting this to true can be helpful if you have special authentication requirements that cargo-deny does not support.
# See Git Authentication for more information about setting up git authentication.
#git-fetch-with-cli = true
# This section is considered when running `cargo deny check licenses`
# More documentation for the licenses section can be found here:
# https://embarkstudios.github.io/cargo-deny/checks/licenses/cfg.html
[licenses]
# List of explicitly allowed licenses
# See https://spdx.org/licenses/ for list of possible licenses
# [possible values: any SPDX 3.11 short identifier (+ optional exception)].
allow = [
"CC0-1.0",
"MIT",
"MPL-2.0",
"Apache-2.0",
"Unicode-DFS-2016",
"ISC",
"BSD-3-Clause",
"OpenSSL",
"Zlib",
]
# The confidence threshold for detecting a license from license text.
# The higher the value, the more closely the license text must be to the
# canonical license text of a valid SPDX license file.
# [possible values: any between 0.0 and 1.0].
confidence-threshold = 0.8
# Allow 1 or more licenses on a per-crate basis, so that particular licenses
# aren't accepted for every possible crate as with the normal allow list
exceptions = [
# Each entry is the crate and version constraint, and its specific allow
# list
#{ allow = ["Zlib"], crate = "adler32" },
]
# Some crates don't have (easily) machine readable licensing information,
# adding a clarification entry for it allows you to manually specify the
# licensing information
#[[licenses.clarify]]
# The package spec the clarification applies to
#crate = "ring"
# The SPDX expression for the license requirements of the crate
#expression = "MIT AND ISC AND OpenSSL"
# One or more files in the crate's source used as the "source of truth" for
# the license expression. If the contents match, the clarification will be used
# when running the license check, otherwise the clarification will be ignored
# and the crate will be checked normally, which may produce warnings or errors
# depending on the rest of your configuration
#license-files = [
# Each entry is a crate relative path, and the (opaque) hash of its contents
#{ path = "LICENSE", hash = 0xbd0eed23 }
#]
[[licenses.clarify]]
crate = "ring"
expression = "MIT AND ISC AND OpenSSL"
license-files = [
{ path = "LICENSE", hash = 0xbd0eed23 }
]
[licenses.private]
# If true, ignores workspace crates that aren't published, or are only
# published to private registries.
# To see how to mark a crate as unpublished (to the official registry),
# visit https://doc.rust-lang.org/cargo/reference/manifest.html#the-publish-field.
ignore = false
# One or more private registries that you might publish crates to, if a crate
# is only published to private registries, and ignore is true, the crate will
# not have its license(s) checked
registries = [
#"https://sekretz.com/registry
]
# This section is considered when running `cargo deny check bans`.
# More documentation about the 'bans' section can be found here:
# https://embarkstudios.github.io/cargo-deny/checks/bans/cfg.html
[bans]
# Lint level for when multiple versions of the same crate are detected
multiple-versions = "warn"
# Lint level for when a crate version requirement is `*`
wildcards = "allow"
# The graph highlighting used when creating dotgraphs for crates
# with multiple versions
# * lowest-version - The path to the lowest versioned duplicate is highlighted
# * simplest-path - The path to the version with the fewest edges is highlighted
# * all - Both lowest-version and simplest-path are used
highlight = "all"
# The default lint level for `default` features for crates that are members of
# the workspace that is being checked. This can be overridden by allowing/denying
# `default` on a crate-by-crate basis if desired.
workspace-default-features = "allow"
# The default lint level for `default` features for external crates that are not
# members of the workspace. This can be overridden by allowing/denying `default`
# on a crate-by-crate basis if desired.
external-default-features = "allow"
# List of crates that are allowed. Use with care!
allow = [
#"ansi_term@0.11.0",
#{ crate = "ansi_term@0.11.0", reason = "you can specify a reason it is allowed" },
]
# List of crates to deny
deny = [
#"ansi_term@0.11.0",
#{ crate = "ansi_term@0.11.0", reason = "you can specify a reason it is banned" },
# Wrapper crates can optionally be specified to allow the crate when it
# is a direct dependency of the otherwise banned crate
#{ crate = "ansi_term@0.11.0", wrappers = ["this-crate-directly-depends-on-ansi_term"] },
]
# List of features to allow/deny
# Each entry the name of a crate and a version range. If version is
# not specified, all versions will be matched.
#[[bans.features]]
#crate = "reqwest"
# Features to not allow
#deny = ["json"]
# Features to allow
#allow = [
# "rustls",
# "__rustls",
# "__tls",
# "hyper-rustls",
# "rustls",
# "rustls-pemfile",
# "rustls-tls-webpki-roots",
# "tokio-rustls",
# "webpki-roots",
#]
# If true, the allowed features must exactly match the enabled feature set. If
# this is set there is no point setting `deny`
#exact = true
# Certain crates/versions that will be skipped when doing duplicate detection.
skip = [
"hashbrown",
"sync_wrapper",
#{ crate = "ansi_term@0.11.0", reason = "you can specify a reason why it can't be updated/removed" },
]
# Similarly to `skip` allows you to skip certain crates during duplicate
# detection. Unlike skip, it also includes the entire tree of transitive
# dependencies starting at the specified crate, up to a certain depth, which is
# by default infinite.
skip-tree = [
#"ansi_term@0.11.0", # will be skipped along with _all_ of its direct and transitive dependencies
#{ crate = "ansi_term@0.11.0", depth = 20 },
]
# This section is considered when running `cargo deny check sources`.
# More documentation about the 'sources' section can be found here:
# https://embarkstudios.github.io/cargo-deny/checks/sources/cfg.html
[sources]
# Lint level for what to happen when a crate from a crate registry that is not
# in the allow list is encountered
unknown-registry = "warn"
# Lint level for what to happen when a crate from a git repository that is not
# in the allow list is encountered
unknown-git = "warn"
# List of URLs for allowed crate registries. Defaults to the crates.io index
# if not specified. If it is specified but empty, no registries are allowed.
allow-registry = ["https://github.com/rust-lang/crates.io-index"]
# List of URLs for allowed Git repositories
allow-git = []
[sources.allow-org]
# 1 or more github.com organizations to allow git sources for
github = []
# 1 or more gitlab.com organizations to allow git sources for
gitlab = []
# 1 or more bitbucket.org organizations to allow git sources for
bitbucket = []

60
doc/architecture.gv Normal file
View file

@ -0,0 +1,60 @@
digraph {
subgraph cluster_pki {
label="PKI"
ca [label="CA Key", shape=note]
subgraph cluster_pki_crl {
label="CRL Infrastructure(Optional)"
crl_listener [label="http://my.crl", shape=triangle,rank=0]
crl -> crl_listener [label="Static file"]
}
}
subgraph cluster_0 {
label="Master docker compose"
web_app [label="Web app", shape=box]
db [label="Postgres", shape=box]
redis [label="Redis", shape=box]
replikey [label="Replikey", shape=box]
replikey -> db [label="SNI Routing",color=orange]
replikey -> redis [label="SNI Routing",color=orange]
server_cert [label="Server cert", shape=note]
server_key [label="Server key", shape=note]
server_key -> server_cert [label="Private key"]
web_app -> db
web_app -> redis
ca_cert [label="CA cert", shape=note]
server_cert -> replikey [label="Authenticate"]
ca_cert -> replikey [label="Trust"]
listen_master_web [label=":80", shape=triangle,rank=0]
listen_master_replikey [label=":6443", shape=triangle,rank=0]
replikey -> listen_master_replikey [label="Listen",dir=back]
web_app -> listen_master_web [label="Listen"]
}
subgraph cluster_1 {
label="Slave docker compose"
db_slave [label="Postgres", shape=box]
redis_slave [label="Redis", shape=box]
replikey_slave_db [label="Replikey DB Client", shape=box]
replikey_slave_redis [label="Replikey Redis Client", shape=box]
db_slave -> replikey_slave_db [label="Plain TCP",color=orange]
redis_slave -> replikey_slave_redis [label="Plain TCP",color=orange]
client_cert [label="Client cert", shape=note]
client_key [label="Client key", shape=note]
client_key -> client_cert [label="Private key"]
ca_cert_slave [label="CA cert", shape=note]
client_cert -> replikey_slave_db [label="Authenticate"]
ca_cert_slave -> replikey_slave_db [label="Trust"]
client_cert -> replikey_slave_redis [label="Authenticate"]
ca_cert_slave -> replikey_slave_redis [label="Trust"]
}
replikey_slave_db -> listen_master_replikey [label="TLS with SNI",constraint=false,color=green]
replikey_slave_redis -> listen_master_replikey [label="TLS with SNI",constraint=false,color=green]
}

385
doc/architecture.svg Normal file
View file

@ -0,0 +1,385 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<!-- Generated by graphviz version 12.1.2 (0)
-->
<!-- Pages: 1 -->
<svg width="1229pt" height="431pt"
viewBox="0.00 0.00 1229.00 430.50" xmlns="http://www.w3.org/2000/svg"
xmlns:xlink="http://www.w3.org/1999/xlink">
<g id="graph0" class="graph" transform="scale(1 1) rotate(0) translate(4 426.5)">
<polygon fill="white" stroke="none" points="-4,4 -4,-426.5 1225,-426.5 1225,4 -4,4" />
<g id="clust1" class="cluster">
<title>cluster_pki</title>
<polygon fill="none" stroke="black" points="8,-196 8,-414.5 336,-414.5 336,-196 8,-196" />
<text text-anchor="middle" x="172" y="-397.2" font-family="Times,serif"
font-size="14.00">PKI</text>
</g>
<g id="clust2" class="cluster">
<title>cluster_pki_crl</title>
<polygon fill="none" stroke="black" points="16,-204 16,-382 260,-382 260,-204 16,-204" />
<text text-anchor="middle" x="138" y="-364.7" font-family="Times,serif"
font-size="14.00">CRL Infrastructure(Optional)</text>
</g>
<g id="clust3" class="cluster">
<title>cluster_0</title>
<polygon fill="none" stroke="black" points="809,-8 809,-382 1213,-382 1213,-8 809,-8" />
<text text-anchor="middle" x="1011" y="-364.7" font-family="Times,serif"
font-size="14.00">Master docker compose</text>
</g>
<g id="clust4" class="cluster">
<title>cluster_1</title>
<polygon fill="none" stroke="black"
points="344,-109.5 344,-382 801,-382 801,-109.5 344,-109.5" />
<text text-anchor="middle" x="572.5" y="-364.7" font-family="Times,serif"
font-size="14.00">Slave docker compose</text>
</g>
<!-- ca -->
<g id="node1" class="node">
<title>ca</title>
<polygon fill="none" stroke="black"
points="322.12,-349.5 267.88,-349.5 267.88,-313.5 328.12,-313.5 328.12,-343.5 322.12,-349.5" />
<polyline fill="none" stroke="black" points="322.12,-349.5 322.12,-343.5" />
<polyline fill="none" stroke="black" points="328.12,-343.5 322.12,-343.5" />
<text text-anchor="middle" x="298" y="-326.45" font-family="Times,serif"
font-size="14.00">CA Key</text>
</g>
<!-- crl_listener -->
<g id="node2" class="node">
<title>crl_listener</title>
<polygon fill="none" stroke="black"
points="138,-261 24.04,-224.25 251.96,-224.25 138,-261" />
<text text-anchor="middle" x="138" y="-231.45" font-family="Times,serif"
font-size="14.00">http://my.crl</text>
</g>
<!-- crl -->
<g id="node3" class="node">
<title>crl</title>
<ellipse fill="none" stroke="black" cx="138" cy="-331.5" rx="27" ry="18" />
<text text-anchor="middle" x="138" y="-326.45" font-family="Times,serif"
font-size="14.00">crl</text>
</g>
<!-- crl&#45;&gt;crl_listener -->
<g id="edge1" class="edge">
<title>crl&#45;&gt;crl_listener</title>
<path fill="none" stroke="black" d="M138,-313.01C138,-301.58 138,-286.26 138,-272.41" />
<polygon fill="black" stroke="black"
points="141.5,-272.75 138,-262.75 134.5,-272.75 141.5,-272.75" />
<text text-anchor="middle" x="164.25" y="-282.2" font-family="Times,serif"
font-size="14.00">Static file</text>
</g>
<!-- web_app -->
<g id="node4" class="node">
<title>web_app</title>
<polygon fill="none" stroke="black"
points="957,-153.5 893,-153.5 893,-117.5 957,-117.5 957,-153.5" />
<text text-anchor="middle" x="925" y="-130.45" font-family="Times,serif"
font-size="14.00">Web app</text>
</g>
<!-- db -->
<g id="node5" class="node">
<title>db</title>
<polygon fill="none" stroke="black"
points="986.88,-58.5 925.12,-58.5 925.12,-22.5 986.88,-22.5 986.88,-58.5" />
<text text-anchor="middle" x="956" y="-35.45" font-family="Times,serif"
font-size="14.00">Postgres</text>
</g>
<!-- web_app&#45;&gt;db -->
<g id="edge5" class="edge">
<title>web_app&#45;&gt;db</title>
<path fill="none" stroke="black"
d="M924.71,-117.39C925,-107.16 926.24,-94.05 930,-83 931.68,-78.07 934.04,-73.12 936.67,-68.44" />
<polygon fill="black" stroke="black"
points="939.58,-70.39 941.88,-60.05 933.63,-66.7 939.58,-70.39" />
</g>
<!-- redis -->
<g id="node6" class="node">
<title>redis</title>
<polygon fill="none" stroke="black"
points="1059,-58.5 1005,-58.5 1005,-22.5 1059,-22.5 1059,-58.5" />
<text text-anchor="middle" x="1032" y="-35.45" font-family="Times,serif"
font-size="14.00">Redis</text>
</g>
<!-- web_app&#45;&gt;redis -->
<g id="edge6" class="edge">
<title>web_app&#45;&gt;redis</title>
<path fill="none" stroke="black"
d="M931.67,-117.34C936.69,-106.32 944.5,-92.37 955,-83 969.85,-69.75 978.5,-74.47 996,-65 996.21,-64.88 996.43,-64.77 996.64,-64.65" />
<polygon fill="black" stroke="black"
points="998.19,-67.8 1005.05,-59.73 994.65,-61.76 998.19,-67.8" />
</g>
<!-- listen_master_web -->
<g id="node11" class="node">
<title>listen_master_web</title>
<polygon fill="none" stroke="black" points="862,-65 816.62,-28.25 907.38,-28.25 862,-65" />
<text text-anchor="middle" x="862" y="-35.45" font-family="Times,serif"
font-size="14.00">:80</text>
</g>
<!-- web_app&#45;&gt;listen_master_web -->
<g id="edge10" class="edge">
<title>web_app&#45;&gt;listen_master_web</title>
<path fill="none" stroke="black"
d="M904.09,-117.23C898.48,-111.93 892.74,-105.82 888.25,-99.5 882.02,-90.73 876.73,-80.24 872.57,-70.61" />
<polygon fill="black" stroke="black"
points="875.93,-69.59 868.93,-61.64 869.45,-72.22 875.93,-69.59" />
<text text-anchor="middle" x="905.12" y="-86.2" font-family="Times,serif"
font-size="14.00">Listen</text>
</g>
<!-- replikey -->
<g id="node7" class="node">
<title>replikey</title>
<polygon fill="none" stroke="black"
points="1080.38,-153.5 1015.62,-153.5 1015.62,-117.5 1080.38,-117.5 1080.38,-153.5" />
<text text-anchor="middle" x="1048" y="-130.45" font-family="Times,serif"
font-size="14.00">Replikey</text>
</g>
<!-- replikey&#45;&gt;db -->
<g id="edge2" class="edge">
<title>replikey&#45;&gt;db</title>
<path fill="none" stroke="orange"
d="M1015.48,-128.13C998.56,-123.14 978.97,-114.45 967,-99.5 960.42,-91.28 957.35,-80.31 956.03,-70.12" />
<polygon fill="orange" stroke="orange"
points="959.53,-69.96 955.3,-60.25 952.55,-70.48 959.53,-69.96" />
<text text-anchor="middle" x="1001.5" y="-86.2" font-family="Times,serif"
font-size="14.00">SNI Routing</text>
</g>
<!-- replikey&#45;&gt;redis -->
<g id="edge3" class="edge">
<title>replikey&#45;&gt;redis</title>
<path fill="none" stroke="orange"
d="M1044.99,-117.01C1042.71,-103.76 1039.53,-85.28 1036.89,-69.92" />
<polygon fill="orange" stroke="orange"
points="1040.4,-69.67 1035.25,-60.41 1033.5,-70.85 1040.4,-69.67" />
<text text-anchor="middle" x="1076.4" y="-86.2" font-family="Times,serif"
font-size="14.00">SNI Routing</text>
</g>
<!-- listen_master_replikey -->
<g id="node12" class="node">
<title>listen_master_replikey</title>
<polygon fill="none" stroke="black"
points="1141,-65 1077.19,-28.25 1204.81,-28.25 1141,-65" />
<text text-anchor="middle" x="1141" y="-35.45" font-family="Times,serif"
font-size="14.00">:6443</text>
</g>
<!-- replikey&#45;&gt;listen_master_replikey -->
<g id="edge9" class="edge">
<title>replikey&#45;&gt;listen_master_replikey</title>
<path fill="none" stroke="black"
d="M1090.83,-117.59C1099.6,-112.71 1108.21,-106.71 1115,-99.5 1124.83,-89.06 1131.39,-74.26 1135.45,-62.23" />
<polygon fill="black" stroke="black"
points="1089.25,-114.47 1081.92,-122.12 1092.42,-120.71 1089.25,-114.47" />
<text text-anchor="middle" x="1142.87" y="-86.2" font-family="Times,serif"
font-size="14.00">Listen</text>
</g>
<!-- server_cert -->
<g id="node8" class="node">
<title>server_cert</title>
<polygon fill="none" stroke="black"
points="1033.62,-254.5 964.38,-254.5 964.38,-218.5 1039.62,-218.5 1039.62,-248.5 1033.62,-254.5" />
<polyline fill="none" stroke="black" points="1033.62,-254.5 1033.62,-248.5" />
<polyline fill="none" stroke="black" points="1039.62,-248.5 1033.62,-248.5" />
<text text-anchor="middle" x="1002" y="-231.45" font-family="Times,serif"
font-size="14.00">Server cert</text>
</g>
<!-- server_cert&#45;&gt;replikey -->
<g id="edge7" class="edge">
<title>server_cert&#45;&gt;replikey</title>
<path fill="none" stroke="black"
d="M998.38,-218.25C996.45,-204.79 995.76,-186.02 1003,-171.5 1004.78,-167.94 1007.11,-164.63 1009.77,-161.59" />
<polygon fill="black" stroke="black"
points="1012.1,-164.21 1016.89,-154.76 1007.25,-159.15 1012.1,-164.21" />
<text text-anchor="middle" x="1037.5" y="-174.7" font-family="Times,serif"
font-size="14.00">Authenticate</text>
</g>
<!-- server_key -->
<g id="node9" class="node">
<title>server_key</title>
<polygon fill="none" stroke="black"
points="1033.25,-349.5 964.75,-349.5 964.75,-313.5 1039.25,-313.5 1039.25,-343.5 1033.25,-349.5" />
<polyline fill="none" stroke="black" points="1033.25,-349.5 1033.25,-343.5" />
<polyline fill="none" stroke="black" points="1039.25,-343.5 1033.25,-343.5" />
<text text-anchor="middle" x="1002" y="-326.45" font-family="Times,serif"
font-size="14.00">Server key</text>
</g>
<!-- server_key&#45;&gt;server_cert -->
<g id="edge4" class="edge">
<title>server_key&#45;&gt;server_cert</title>
<path fill="none" stroke="black"
d="M1002,-313.01C1002,-299.89 1002,-281.64 1002,-266.37" />
<polygon fill="black" stroke="black"
points="1005.5,-266.43 1002,-256.43 998.5,-266.43 1005.5,-266.43" />
<text text-anchor="middle" x="1032.75" y="-282.2" font-family="Times,serif"
font-size="14.00">Private key</text>
</g>
<!-- ca_cert -->
<g id="node10" class="node">
<title>ca_cert</title>
<polygon fill="none" stroke="black"
points="1117,-254.5 1065,-254.5 1065,-218.5 1123,-218.5 1123,-248.5 1117,-254.5" />
<polyline fill="none" stroke="black" points="1117,-254.5 1117,-248.5" />
<polyline fill="none" stroke="black" points="1123,-248.5 1117,-248.5" />
<text text-anchor="middle" x="1094" y="-231.45" font-family="Times,serif"
font-size="14.00">CA cert</text>
</g>
<!-- ca_cert&#45;&gt;replikey -->
<g id="edge8" class="edge">
<title>ca_cert&#45;&gt;replikey</title>
<path fill="none" stroke="black"
d="M1089.42,-218.15C1085.61,-204.97 1079.6,-186.61 1072,-171.5 1070.62,-168.76 1069.05,-165.98 1067.4,-163.25" />
<polygon fill="black" stroke="black"
points="1070.41,-161.46 1062.01,-155 1064.55,-165.29 1070.41,-161.46" />
<text text-anchor="middle" x="1093.04" y="-174.7" font-family="Times,serif"
font-size="14.00">Trust</text>
</g>
<!-- db_slave -->
<g id="node13" class="node">
<title>db_slave</title>
<polygon fill="none" stroke="black"
points="413.88,-254.5 352.12,-254.5 352.12,-218.5 413.88,-218.5 413.88,-254.5" />
<text text-anchor="middle" x="383" y="-231.45" font-family="Times,serif"
font-size="14.00">Postgres</text>
</g>
<!-- replikey_slave_db -->
<g id="node15" class="node">
<title>replikey_slave_db</title>
<polygon fill="none" stroke="black"
points="511,-153.5 387,-153.5 387,-117.5 511,-117.5 511,-153.5" />
<text text-anchor="middle" x="449" y="-130.45" font-family="Times,serif"
font-size="14.00">Replikey DB Client</text>
</g>
<!-- db_slave&#45;&gt;replikey_slave_db -->
<g id="edge11" class="edge">
<title>db_slave&#45;&gt;replikey_slave_db</title>
<path fill="none" stroke="orange"
d="M379.66,-218.12C377.98,-204.42 377.85,-185.38 386.5,-171.5 389.02,-167.46 392.18,-163.85 395.73,-160.63" />
<polygon fill="orange" stroke="orange"
points="397.63,-163.58 403.43,-154.72 393.36,-158.03 397.63,-163.58" />
<text text-anchor="middle" x="414.25" y="-174.7" font-family="Times,serif"
font-size="14.00">Plain TCP</text>
</g>
<!-- redis_slave -->
<g id="node14" class="node">
<title>redis_slave</title>
<polygon fill="none" stroke="black"
points="764,-254.5 710,-254.5 710,-218.5 764,-218.5 764,-254.5" />
<text text-anchor="middle" x="737" y="-231.45" font-family="Times,serif"
font-size="14.00">Redis</text>
</g>
<!-- replikey_slave_redis -->
<g id="node16" class="node">
<title>replikey_slave_redis</title>
<polygon fill="none" stroke="black"
points="753,-153.5 617,-153.5 617,-117.5 753,-117.5 753,-153.5" />
<text text-anchor="middle" x="685" y="-130.45" font-family="Times,serif"
font-size="14.00">Replikey Redis Client</text>
</g>
<!-- redis_slave&#45;&gt;replikey_slave_redis -->
<g id="edge12" class="edge">
<title>redis_slave&#45;&gt;replikey_slave_redis</title>
<path fill="none" stroke="orange"
d="M727.94,-218.26C720.09,-203.3 708.58,-181.39 699.45,-164.01" />
<polygon fill="orange" stroke="orange"
points="702.58,-162.44 694.83,-155.22 696.38,-165.7 702.58,-162.44" />
<text text-anchor="middle" x="738.75" y="-174.7" font-family="Times,serif"
font-size="14.00">Plain TCP</text>
</g>
<!-- replikey_slave_db&#45;&gt;listen_master_replikey -->
<g id="edge18" class="edge">
<title>replikey_slave_db&#45;&gt;listen_master_replikey</title>
<path fill="none" stroke="green"
d="M510.56,-117.01C554.75,-105.24 615.93,-90.54 670.75,-83 845.84,-58.91 894.07,-96.36 1068,-65 1080.51,-62.74 1093.85,-58.9 1105.65,-54.97" />
<polygon fill="green" stroke="green"
points="1106.69,-58.31 1114.99,-51.72 1104.39,-51.7 1106.69,-58.31" />
<text text-anchor="middle" x="709.38" y="-86.2" font-family="Times,serif"
font-size="14.00">TLS with SNI</text>
</g>
<!-- replikey_slave_redis&#45;&gt;listen_master_replikey -->
<g id="edge19" class="edge">
<title>replikey_slave_redis&#45;&gt;listen_master_replikey</title>
<path fill="none" stroke="green"
d="M714.72,-117.15C736.62,-105.29 767.56,-90.42 796.75,-83 913.84,-53.22 949.33,-87.68 1068,-65 1080.39,-62.63 1093.6,-58.8 1105.34,-54.91" />
<polygon fill="green" stroke="green"
points="1106.32,-58.27 1114.63,-51.7 1104.03,-51.66 1106.32,-58.27" />
<text text-anchor="middle" x="835.38" y="-86.2" font-family="Times,serif"
font-size="14.00">TLS with SNI</text>
</g>
<!-- client_cert -->
<g id="node17" class="node">
<title>client_cert</title>
<polygon fill="none" stroke="black"
points="534.5,-254.5 467.5,-254.5 467.5,-218.5 540.5,-218.5 540.5,-248.5 534.5,-254.5" />
<polyline fill="none" stroke="black" points="534.5,-254.5 534.5,-248.5" />
<polyline fill="none" stroke="black" points="540.5,-248.5 534.5,-248.5" />
<text text-anchor="middle" x="504" y="-231.45" font-family="Times,serif"
font-size="14.00">Client cert</text>
</g>
<!-- client_cert&#45;&gt;replikey_slave_db -->
<g id="edge14" class="edge">
<title>client_cert&#45;&gt;replikey_slave_db</title>
<path fill="none" stroke="black"
d="M475.84,-218.05C465.79,-210.21 455.57,-200.01 450,-188 446.79,-181.08 445.61,-173.05 445.46,-165.42" />
<polygon fill="black" stroke="black"
points="448.95,-165.63 445.87,-155.49 441.96,-165.34 448.95,-165.63" />
<text text-anchor="middle" x="484.5" y="-174.7" font-family="Times,serif"
font-size="14.00">Authenticate</text>
</g>
<!-- client_cert&#45;&gt;replikey_slave_redis -->
<g id="edge16" class="edge">
<title>client_cert&#45;&gt;replikey_slave_redis</title>
<path fill="none" stroke="black"
d="M509.9,-218.18C515.78,-203.56 526.22,-183.19 542,-171.5 560.36,-157.9 583.34,-149.53 605.46,-144.4" />
<polygon fill="black" stroke="black"
points="606.03,-147.85 615.1,-142.37 604.59,-141 606.03,-147.85" />
<text text-anchor="middle" x="576.5" y="-174.7" font-family="Times,serif"
font-size="14.00">Authenticate</text>
</g>
<!-- client_key -->
<g id="node18" class="node">
<title>client_key</title>
<polygon fill="none" stroke="black"
points="534.12,-349.5 467.88,-349.5 467.88,-313.5 540.12,-313.5 540.12,-343.5 534.12,-349.5" />
<polyline fill="none" stroke="black" points="534.12,-349.5 534.12,-343.5" />
<polyline fill="none" stroke="black" points="540.12,-343.5 534.12,-343.5" />
<text text-anchor="middle" x="504" y="-326.45" font-family="Times,serif"
font-size="14.00">Client key</text>
</g>
<!-- client_key&#45;&gt;client_cert -->
<g id="edge13" class="edge">
<title>client_key&#45;&gt;client_cert</title>
<path fill="none" stroke="black" d="M504,-313.01C504,-299.89 504,-281.64 504,-266.37" />
<polygon fill="black" stroke="black"
points="507.5,-266.43 504,-256.43 500.5,-266.43 507.5,-266.43" />
<text text-anchor="middle" x="534.75" y="-282.2" font-family="Times,serif"
font-size="14.00">Private key</text>
</g>
<!-- ca_cert_slave -->
<g id="node19" class="node">
<title>ca_cert_slave</title>
<polygon fill="none" stroke="black"
points="671,-254.5 619,-254.5 619,-218.5 677,-218.5 677,-248.5 671,-254.5" />
<polyline fill="none" stroke="black" points="671,-254.5 671,-248.5" />
<polyline fill="none" stroke="black" points="677,-248.5 671,-248.5" />
<text text-anchor="middle" x="648" y="-231.45" font-family="Times,serif"
font-size="14.00">CA cert</text>
</g>
<!-- ca_cert_slave&#45;&gt;replikey_slave_db -->
<g id="edge15" class="edge">
<title>ca_cert_slave&#45;&gt;replikey_slave_db</title>
<path fill="none" stroke="black"
d="M642.45,-218.08C636.85,-203.4 626.77,-182.99 611,-171.5 585.53,-152.94 552.21,-143.86 522.41,-139.54" />
<polygon fill="black" stroke="black"
points="523.05,-136.09 512.69,-138.3 522.17,-143.03 523.05,-136.09" />
<text text-anchor="middle" x="640.5" y="-174.7" font-family="Times,serif"
font-size="14.00">Trust</text>
</g>
<!-- ca_cert_slave&#45;&gt;replikey_slave_redis -->
<g id="edge17" class="edge">
<title>ca_cert_slave&#45;&gt;replikey_slave_redis</title>
<path fill="none" stroke="black"
d="M654.44,-218.26C659.98,-203.44 668.07,-181.8 674.53,-164.5" />
<polygon fill="black" stroke="black"
points="677.75,-165.89 677.97,-155.3 671.2,-163.44 677.75,-165.89" />
<text text-anchor="middle" x="685.15" y="-174.7" font-family="Times,serif"
font-size="14.00">Trust</text>
</g>
</g>
</svg>

After

Width:  |  Height:  |  Size: 21 KiB

200
src/bin/replikey.rs Normal file
View file

@ -0,0 +1,200 @@
use clap::Parser;
#[cfg(feature = "setup-postgres")]
use replikey::ops::postgres::{
add_table_to_postgres_pub, drop_postgres_pub, drop_postgres_sub, drop_table_from_postgres_pub,
setup_postgres_pub, setup_postgres_sub, SetupPostgresMasterCommand,
SetupPostgresMasterSubCommand, SetupPostgresSlaveCommand, SetupPostgresSlaveSubCommand,
};
#[cfg(feature = "keygen")]
use replikey::{
cert::UsageType,
ops::service::{
service_replicate_master, service_replicate_slave, ServiceCommand, ServiceSubCommand,
},
};
#[cfg(feature = "keygen")]
use replikey::ops::cert::*;
#[cfg(feature = "networking")]
use replikey::ops::network::*;
use rustls::crypto::{aws_lc_rs, CryptoProvider};
#[derive(Debug, Parser)]
#[clap(name = "replikey")]
#[clap(version = env!("CARGO_PKG_VERSION"))]
struct Opts {
#[clap(subcommand)]
subcmd: SubCommand,
}
#[derive(Debug, Parser)]
#[clap(name = "not-available")]
struct NotAvailable;
#[derive(Debug, Parser)]
enum SubCommand {
#[cfg(feature = "keygen")]
#[clap(name = "cert")]
Cert(CertCommand),
#[cfg(feature = "networking")]
#[clap(name = "network")]
Network(NetworkCommand),
#[clap(name = "service")]
#[cfg(feature = "service")]
Service(ServiceCommand),
#[cfg(feature = "setup-postgres")]
#[clap(name = "setup-postgres-master")]
SetupPostgresMaster(SetupPostgresMasterCommand),
#[cfg(feature = "setup-postgres")]
#[clap(name = "setup-postgres-slave")]
SetupPostgresSlave(SetupPostgresSlaveCommand),
Info,
}
#[cfg(feature = "asyncio")]
fn start_runtime() -> tokio::runtime::Runtime {
use tokio::runtime::Builder;
Builder::new_current_thread().enable_all().build().unwrap()
}
fn main() {
if std::env::var("RUST_LOG").is_err() {
std::env::set_var("RUST_LOG", "info");
}
env_logger::init();
let opts: Opts = Opts::parse();
CryptoProvider::install_default(aws_lc_rs::default_provider())
.expect("Failed to install crypto provider");
match opts.subcmd {
SubCommand::Info => {
println!("replikey v{}", env!("CARGO_PKG_VERSION"));
println!("Feature flags:");
macro_rules! print_feature {
($feature:literal) => {
println!(
" {}: {}",
$feature,
if cfg!(feature = $feature) {
"YES"
} else {
"NO"
}
);
};
}
print_feature!("serde");
print_feature!("keygen");
print_feature!("asyncio");
print_feature!("networking");
print_feature!("service");
print_feature!("remote-crl");
print_feature!("setup-postgres");
}
#[cfg(feature = "keygen")]
SubCommand::Cert(cert) => match cert.subcmd {
CertSubCommand::CreateCa(ca) => {
create_ca(ca, true);
}
CertSubCommand::CreateServer(server) => {
create_server(server);
}
CertSubCommand::CreateClient(client) => {
create_client(client);
}
CertSubCommand::SignServerCSR(opts) => {
sign_csr(opts, UsageType::Server, true);
}
CertSubCommand::SignClientCSR(opts) => {
sign_csr(opts, UsageType::Client, true);
}
CertSubCommand::GenerateCrl(opts) => {
revoke_cert(opts);
}
},
#[cfg(feature = "service")]
SubCommand::Network(network) => match network.subcmd {
NetworkSubCommand::ReverseProxy(opts) => {
println!("Reverse proxy: {:?}", opts);
let rt = start_runtime();
rt.block_on(reverse_proxy(opts))
.expect("Failed to run reverse proxy");
}
NetworkSubCommand::ForwardProxy(opts) => {
println!("Forward proxy: {:?}", opts);
let rt = start_runtime();
rt.block_on(forward_proxy(opts))
.expect("Failed to run forward proxy");
}
},
#[cfg(feature = "service")]
SubCommand::Service(service) => match service.subcmd {
ServiceSubCommand::ReplicateMaster { config } => {
service_replicate_master(config);
}
ServiceSubCommand::ReplicateSlave { config } => {
service_replicate_slave(config);
}
},
#[cfg(feature = "setup-postgres")]
SubCommand::SetupPostgresMaster(opts) => {
let conn = opts.connection_string.as_deref();
let rt = start_runtime();
rt.block_on(async {
match opts.subcmd {
SetupPostgresMasterSubCommand::Setup(opts) => {
setup_postgres_pub(conn, opts)
.await
.expect("Failed to setup publication");
}
SetupPostgresMasterSubCommand::Drop(opts) => {
drop_postgres_pub(conn, opts)
.await
.expect("Failed to drop publication");
}
SetupPostgresMasterSubCommand::AddTable(opts) => {
add_table_to_postgres_pub(conn, opts)
.await
.expect("Failed to add table to publication");
}
SetupPostgresMasterSubCommand::DropTable(opts) => {
drop_table_from_postgres_pub(conn, opts)
.await
.expect("Failed to drop table from publication");
}
}
});
}
#[cfg(feature = "setup-postgres")]
SubCommand::SetupPostgresSlave(opts) => {
let conn = opts.connection_string.as_deref();
let rt = start_runtime();
rt.block_on(async {
match opts.subcmd {
SetupPostgresSlaveSubCommand::Setup(opts) => {
setup_postgres_sub(conn, opts)
.await
.expect("Failed to setup subscription");
}
SetupPostgresSlaveSubCommand::Drop(opts) => {
drop_postgres_sub(conn, opts)
.await
.expect("Failed to drop subscription");
}
}
});
}
}
}

161
src/cert.rs Normal file
View file

@ -0,0 +1,161 @@
use rand_core::{OsRng, RngCore};
use rcgen::{
BasicConstraints, CertificateParams, CrlDistributionPoint, DistinguishedName,
ExtendedKeyUsagePurpose, Ia5String, IsCa, KeyUsagePurpose, SanType, SerialNumber,
};
use time::OffsetDateTime;
pub fn default_ca_options(not_after: OffsetDateTime, dn: DistinguishedName) -> CertificateParams {
let mut start = CertificateParams::default();
let mut serial = [0u8; 20];
OsRng.fill_bytes(&mut serial);
start.serial_number = Some(SerialNumber::from_slice(&serial));
start.not_before = OffsetDateTime::now_utc();
start.not_after = not_after;
start.distinguished_name = dn;
start.key_usages = vec![
KeyUsagePurpose::DigitalSignature,
KeyUsagePurpose::CrlSign,
KeyUsagePurpose::KeyCertSign,
];
start.extended_key_usages = vec![
ExtendedKeyUsagePurpose::ServerAuth,
ExtendedKeyUsagePurpose::ClientAuth,
];
start.is_ca = IsCa::Ca(BasicConstraints::Unconstrained);
start
}
pub fn default_server_cert_options(
not_after: OffsetDateTime,
dns_names: &[&str],
ip_addrs: &[&str],
dn: DistinguishedName,
csr: bool, // remove fields that are not needed for csr
) -> CertificateParams {
let mut start = CertificateParams::default();
let mut serial = [0u8; 20];
OsRng.fill_bytes(&mut serial);
start.distinguished_name = dn;
start.not_before = OffsetDateTime::now_utc();
start.not_after = not_after;
start.subject_alt_names = dns_names
.iter()
.map(|n| SanType::DnsName(Ia5String::try_from(*n).expect("Invalid DNS name")))
.chain(
ip_addrs
.iter()
.map(|n| SanType::IpAddress(n.parse().unwrap())),
)
.collect();
if !csr {
start.serial_number = Some(SerialNumber::from_slice(&serial));
start.key_usages = vec![
KeyUsagePurpose::DigitalSignature,
KeyUsagePurpose::DataEncipherment,
];
start.extended_key_usages = vec![ExtendedKeyUsagePurpose::ServerAuth];
start.is_ca = IsCa::ExplicitNoCa;
}
start
}
#[derive(Debug, Clone, Copy)]
pub enum UsageType {
Server,
Client,
}
pub fn csr_apply_server(
csr: &mut CertificateParams,
not_after: OffsetDateTime,
dns_names: &[&str],
ip_addrs: &[&str],
) {
let mut serial = [0u8; 20];
OsRng.fill_bytes(&mut serial);
csr.serial_number = Some(SerialNumber::from_slice(&serial));
csr.not_before = OffsetDateTime::now_utc();
csr.not_after = not_after.min(csr.not_after);
csr.key_usages = vec![
KeyUsagePurpose::DigitalSignature,
KeyUsagePurpose::DataEncipherment,
];
csr.extended_key_usages = vec![ExtendedKeyUsagePurpose::ServerAuth];
csr.is_ca = IsCa::ExplicitNoCa;
csr.subject_alt_names = dns_names
.iter()
.map(|n| SanType::DnsName(Ia5String::try_from(*n).expect("Invalid DNS name")))
.chain(
ip_addrs
.iter()
.map(|n| SanType::IpAddress(n.parse().unwrap())),
)
.collect();
}
pub fn csr_apply_client(
csr: &mut CertificateParams,
not_after: OffsetDateTime,
dns_names: &[&str],
ip_addrs: &[&str],
) {
let mut serial = [0u8; 20];
OsRng.fill_bytes(&mut serial);
csr.serial_number = Some(SerialNumber::from_slice(&serial));
csr.not_before = OffsetDateTime::now_utc();
csr.not_after = not_after.min(csr.not_after);
csr.key_usages = vec![
KeyUsagePurpose::DigitalSignature,
KeyUsagePurpose::DataEncipherment,
];
csr.extended_key_usages = vec![ExtendedKeyUsagePurpose::ClientAuth];
csr.is_ca = IsCa::ExplicitNoCa;
csr.subject_alt_names = dns_names
.iter()
.map(|n| SanType::DnsName(Ia5String::try_from(*n).expect("Invalid DNS name")))
.chain(
ip_addrs
.iter()
.map(|n| SanType::IpAddress(n.parse().unwrap())),
)
.collect();
}
pub fn default_client_cert_options(
not_after: OffsetDateTime,
dn: DistinguishedName,
crls: Vec<CrlDistributionPoint>,
csr: bool,
) -> CertificateParams {
let mut start = CertificateParams::default();
start.distinguished_name = dn;
start.not_before = OffsetDateTime::now_utc();
start.not_after = not_after;
let mut serial = [0u8; 20];
OsRng.fill_bytes(&mut serial);
if !csr {
start.serial_number = Some(SerialNumber::from_slice(&serial));
start.key_usages = vec![
KeyUsagePurpose::DigitalSignature,
KeyUsagePurpose::DataEncipherment,
];
start.extended_key_usages = vec![ExtendedKeyUsagePurpose::ClientAuth];
start.is_ca = IsCa::ExplicitNoCa;
start.crl_distribution_points = crls;
}
start
}

221
src/fs_crypt.rs Normal file
View file

@ -0,0 +1,221 @@
use std::{
fs::OpenOptions,
io::{Read, Seek, Write},
os::unix::fs::OpenOptionsExt,
path::Path,
};
use aes_gcm::{
aead::{Aead, AeadMutInPlace},
AeadCore, Aes256Gcm, KeyInit,
};
use argon2::Argon2;
use rand_core::{OsRng, RngCore};
use rpassword::prompt_password;
const KEY_SIZE: usize = 32;
const SALT_SIZE: usize = 16;
const NONCE_SIZE: usize = 12;
#[derive(Debug, Clone, Copy)]
pub enum Argon2Hardness {
Test,
Default,
Hard,
}
pub fn private_fs_write<P: AsRef<Path>, C: AsRef<[u8]>>(path: P, data: C) -> std::io::Result<()> {
let mut file = OpenOptions::new()
.write(true)
.create(true)
.truncate(true)
.mode(0o600)
.open(path)?;
file.write_all(data.as_ref())
}
pub fn derive_password(password: &str, salt: &[u8], hardness: Argon2Hardness) -> [u8; KEY_SIZE] {
let mut key = [0u8; KEY_SIZE];
Argon2::new(
argon2::Algorithm::Argon2id,
argon2::Version::V0x13,
match hardness {
Argon2Hardness::Test => argon2::ParamsBuilder::default()
.t_cost(1)
.m_cost(16)
.build()
.unwrap(),
Argon2Hardness::Default => argon2::ParamsBuilder::default().build().unwrap(),
Argon2Hardness::Hard => argon2::ParamsBuilder::default()
.t_cost(30)
.m_cost(128 << 20)
.build()
.unwrap(),
},
)
.hash_password_into(password.as_bytes(), salt, &mut key)
.unwrap();
key
}
fn pkcs7_pad(output: &mut Vec<u8>, block_size: usize) {
let pad = block_size - output.len() % block_size;
output.extend(std::iter::repeat(pad as u8).take(pad));
}
pub fn encrypt(data: &mut Vec<u8>, password: &str, hardness: Argon2Hardness) {
let mut salt = [0u8; SALT_SIZE];
OsRng
.try_fill_bytes(&mut salt)
.expect("Failed to generate salt");
let nonce = Aes256Gcm::generate_nonce(&mut OsRng);
let key = derive_password(password, &salt, hardness);
let mut header = vec![1]; // in case we need to add a version byte
header.extend_from_slice(&salt);
header.extend_from_slice(&nonce);
let mut cipher = Aes256Gcm::new_from_slice(&key).unwrap();
pkcs7_pad(data, 16);
cipher
.encrypt_in_place(&nonce, &[], data)
.expect("Encryption failed");
data.splice(0..0, header);
}
pub fn decrypt(data: &[u8], password: &str, hardness: Argon2Hardness) -> Vec<u8> {
if data[0] != 1 {
panic!("Unsupported version");
}
let salt = &data[1..SALT_SIZE + 1];
let nonce = &data[SALT_SIZE + 1..SALT_SIZE + NONCE_SIZE + 1];
let key = derive_password(password, salt, hardness);
let cipher = Aes256Gcm::new_from_slice(&key).unwrap();
if (data.len() - 1 - SALT_SIZE - NONCE_SIZE) % 16 != 0 {
panic!("Invalid data length");
}
let mut output = cipher
.decrypt(nonce.into(), &data[SALT_SIZE + NONCE_SIZE + 1..])
.expect("Decryption failed");
let pad = output[output.len() - 1] as usize;
output.truncate(output.len() - pad);
output
}
pub fn encrypt_to_pem(data: &[u8], password: &str, hardness: Argon2Hardness) -> String {
let mut encrypted = data.to_vec();
encrypt(&mut encrypted, password, hardness);
pem_rfc7468::encode_string("ENCRYPTED DATA", pem_rfc7468::LineEnding::LF, &encrypted)
.expect("Failed to encode PEM")
}
pub fn write_encrypted_pem<W: Write>(
mut writer: W,
data: &[u8],
password: &str,
hardness: Argon2Hardness,
) -> std::io::Result<()> {
let mut encrypted = data.to_vec();
encrypt(&mut encrypted, password, hardness);
let pem = pem_rfc7468::encode_string("ENCRYPTED DATA", pem_rfc7468::LineEnding::LF, &encrypted)
.expect("Failed to encode PEM");
writer.write_all(pem.as_bytes())
}
pub fn read_maybe_encrypted_pem<R: Read + Seek>(
mut reader: R,
password: Option<&str>,
hardness: Argon2Hardness,
) -> std::io::Result<Option<Vec<u8>>> {
const SIGNATURE: &[u8] = b"-----BEGIN ENCRYPTED DATA-----";
let mut signature = [0u8; SIGNATURE.len()];
match reader.read_exact(&mut signature) {
Err(e) if e.kind() == std::io::ErrorKind::UnexpectedEof => {
reader.seek(std::io::SeekFrom::Start(0))?;
return Ok(None);
}
Err(e) => {
return Err(e);
}
Ok(_) => {
if signature != SIGNATURE {
reader.seek(std::io::SeekFrom::Start(0))?;
return Ok(None);
}
}
}
reader.seek(std::io::SeekFrom::Start(0))?;
let mut pem = String::new();
reader.read_to_string(&mut pem)?;
let (_, encrypted) = pem_rfc7468::decode_vec(pem.as_bytes()).expect("Failed to decode PEM");
match password {
None => {
let password = prompt_password("Enter password: ").expect("Failed to read password");
Ok(Some(decrypt(&encrypted, &password, hardness)))
}
Some(password) => Ok(Some(decrypt(&encrypted, password, hardness))),
}
}
#[cfg(test)]
mod tests {
use std::io::Cursor;
use super::*;
#[test]
fn test_kdf() {
let password = "password";
let salt = [0u8; SALT_SIZE];
let key = derive_password(password, &salt, Argon2Hardness::Test);
assert_eq!(key.len(), KEY_SIZE);
let key2 = derive_password(password, &salt, Argon2Hardness::Test);
assert_eq!(key, key2);
}
#[test]
fn test_encrypt_decrypt() {
let data = (0..64).map(|_| "Hello, world!").collect::<String>();
for len in 0..data.len() {
let password = "password";
let mut encrypted = data.as_bytes()[..len].to_vec();
encrypt(&mut encrypted, password, Argon2Hardness::Test);
let decrypted = decrypt(&encrypted, password, Argon2Hardness::Test);
assert_eq!(&data.as_bytes()[..len], &decrypted[..]);
}
}
#[test]
fn test_pem() {
let data = (0..64).map(|_| "Hello, world!").collect::<String>();
for len in 0..data.len() {
let password = "password";
let pt = data.as_bytes()[..len].to_vec();
let mut pem = Vec::new();
write_encrypted_pem(&mut pem, &pt, password, Argon2Hardness::Test)
.expect("Failed to write PEM");
let mut pem = Cursor::new(pem);
let decrypted =
read_maybe_encrypted_pem(&mut pem, Some(password), Argon2Hardness::Test)
.expect("Failed to read PEM")
.expect("Failed to decrypt");
assert_eq!(&data.as_bytes()[..len], &decrypted[..]);
}
}
}

5
src/lib.rs Normal file
View file

@ -0,0 +1,5 @@
#[cfg(feature = "keygen")]
pub mod cert;
pub mod ops;
pub mod fs_crypt;

1020
src/ops/cert.rs Normal file

File diff suppressed because it is too large Load diff

9
src/ops/mod.rs Normal file
View file

@ -0,0 +1,9 @@
#[cfg(feature = "keygen")]
pub mod cert;
#[cfg(feature = "networking")]
pub mod network;
#[cfg(feature = "service")]
pub mod service;
#[cfg(feature = "setup-postgres")]
pub mod postgres;

362
src/ops/network.rs Normal file
View file

@ -0,0 +1,362 @@
use std::{io::Cursor, net::ToSocketAddrs, sync::Arc};
use clap::Parser;
use tokio::{
io::{AsyncBufRead, AsyncRead, AsyncWrite, AsyncWriteExt, BufReader},
net::TcpStream,
};
use tokio_rustls::{
rustls::{
client::WebPkiServerVerifier,
pki_types::{pem::PemObject, CertificateDer, PrivateKeyDer, ServerName},
server::WebPkiClientVerifier,
ClientConfig, RootCertStore, ServerConfig,
},
TlsAcceptor, TlsConnector,
};
#[derive(Debug, Parser)]
pub struct NetworkCommand {
#[clap(subcommand)]
pub subcmd: NetworkSubCommand,
}
#[derive(Debug, Parser)]
pub enum NetworkSubCommand {
#[clap(name = "reverse-proxy")]
ReverseProxy(ReverseProxyCommand),
#[clap(name = "forward-proxy")]
ForwardProxy(ForwardProxyCommand),
}
#[derive(Debug, Parser)]
pub struct ReverseProxyCommand {
#[clap(short, long)]
pub listen: String,
#[clap(long)]
pub redis_sni: String,
#[clap(long)]
pub redis_target: String,
#[clap(long)]
pub postgres_sni: String,
#[clap(long)]
pub postgres_target: String,
#[clap(long, help = "Certificate")]
pub cert: String,
#[clap(long, help = "Private key")]
pub key: String,
#[clap(long, help = "CA to trust")]
pub ca: String,
#[clap(long, help = "CRLs to use")]
pub crl: Vec<String>,
}
#[derive(Debug, Parser)]
pub struct ForwardProxyCommand {
#[clap(short, long)]
pub listen: String,
#[clap(short, long)]
pub sni: String,
#[clap(short, long)]
pub target: String,
#[clap(long)]
pub cert: String,
#[clap(long)]
pub key: String,
#[clap(long)]
pub ca: String,
#[clap(long)]
pub crl: Vec<String>,
}
fn compressor_to(w: impl AsyncWrite + Unpin) -> impl AsyncWrite + Unpin {
async_compression::tokio::write::ZstdEncoder::new(w)
}
fn decompressor_from(r: impl AsyncBufRead + Unpin) -> impl AsyncRead + Unpin {
async_compression::tokio::bufread::ZstdDecoder::new(r)
}
async fn send_static_string(w: &mut (impl AsyncWrite + Unpin), s: &str) -> tokio::io::Result<()> {
let mut cursor = Cursor::new(s);
tokio::io::copy(&mut cursor, &mut compressor_to(w)).await?;
Ok(())
}
async fn copy_bidirectional_compressed(
local: impl AsyncRead + AsyncWrite + Unpin,
remote: impl AsyncRead + AsyncWrite + Unpin,
) -> tokio::io::Result<(u64, u64)> {
let (mut local_rx, mut local_tx) = tokio::io::split(local);
let (remote_rx, remote_tx) = tokio::io::split(remote);
let remote_rx_buf = BufReader::new(remote_rx);
let mut remote_tx_comp = compressor_to(remote_tx);
let mut remote_rx_decomp = decompressor_from(remote_rx_buf);
log::info!("Starting transfer");
let uplink = async move {
let res = tokio::io::copy(&mut local_rx, &mut remote_tx_comp).await;
let shutdown = remote_tx_comp.shutdown().await;
let res = res?;
shutdown?;
tokio::io::Result::Ok(res)
};
let downlink = async move {
let res = tokio::io::copy(&mut remote_rx_decomp, &mut local_tx).await;
let shutdown = local_tx.shutdown().await;
let res = res?;
shutdown?;
tokio::io::Result::Ok(res)
};
let res = tokio::try_join!(uplink, downlink)?;
log::info!(
"Finished transferring {} bytes from local to remote and {} bytes from remote to local (compressed)",
res.0,
res.1
);
Ok(res)
}
pub async fn reverse_proxy(opts: ReverseProxyCommand) -> Result<(), Box<dyn std::error::Error>> {
let (_, ca_pem) = x509_parser::pem::parse_x509_pem(&std::fs::read(&opts.ca)?)?;
let (_, ca_cert) = x509_parser::parse_x509_certificate(&ca_pem.contents)?;
let mut cert_store = RootCertStore::empty();
cert_store.add(CertificateDer::from_pem_file(&opts.ca)?)?;
let cert_store = Arc::new(cert_store);
let mut crls = Vec::new();
for crl_def in &opts.crl {
#[cfg(feature = "remote-crl")]
{
// crls are signed so we can trust them
if crl_def.starts_with("http://") || crl_def.starts_with("https://") {
log::info!("Downloading CRL: {}", crl_def);
let crl = reqwest::get(crl_def).await?.bytes().await?;
let (_, parsed) = x509_parser::parse_x509_crl(&crl)?;
if let Err(e) = parsed.verify_signature(ca_cert.public_key()) {
log::error!("Failed to verify CRL signature: {}", e);
if !crl_def.starts_with("https://") {
return Err(e.into());
}
continue;
}
crls.push(crl.to_vec().into());
continue;
}
}
crls.push(std::fs::read(crl_def).expect("Failed to read CRL").into());
}
let cv = WebPkiClientVerifier::builder(cert_store.clone())
.with_crls(crls)
.build()
.expect("Failed to build client verifier");
let config = ServerConfig::builder()
.with_client_cert_verifier(cv)
.with_single_cert(
vec![CertificateDer::from_pem_file(&opts.cert)?],
PrivateKeyDer::from_pem_file(&opts.key)?,
)?;
let acceptor = TlsAcceptor::from(Arc::new(config));
let listener = tokio::net::TcpListener::bind(&opts.listen).await?;
log::info!("Listening on: {}", opts.listen);
let (redis_sni, postgres_sni, redis_target, postgres_target) = (
Arc::new(opts.redis_sni.clone()),
Arc::new(opts.postgres_sni.clone()),
opts.redis_target.clone(),
opts.postgres_target.clone(),
);
if let Err(e) = opts.redis_target.to_socket_addrs() {
eprintln!("Failed to resolve redis target: {}", e);
return Ok(());
}
if let Err(e) = opts.postgres_target.to_socket_addrs() {
eprintln!("Failed to resolve postgres target: {}", e);
return Ok(());
}
loop {
let (pt_stream, _) = match listener.accept().await {
Ok(s) => s,
Err(e) => {
eprintln!("Failed to accept connection: {}", e);
continue;
}
};
let acceptor = acceptor.clone();
let (redis_sni, postgres_sni, redis_target, postgres_target) = (
redis_sni.clone(),
postgres_sni.clone(),
redis_target.clone(),
postgres_target.clone(),
);
tokio::spawn(async move {
match acceptor.accept(pt_stream).await {
Ok(mut tls) => match tls.get_ref().1.server_name().map(|s| s.to_string()) {
Some(sni) if sni == *redis_sni => {
log::info!(
"Accepted Redis connection for {:?}",
tls.get_ref().1.server_name()
);
match tokio::net::TcpStream::connect(&redis_target).await {
Ok(redis) => {
if let Err(e) = copy_bidirectional_compressed(redis, tls).await {
eprintln!("Failed to copy data: {}", e);
}
}
Err(e) => {
eprintln!("Failed to connect to redis: {}", e);
tls.shutdown().await.expect("Failed to shutdown TLS stream");
}
}
}
Some(sni) if sni == *postgres_sni => {
log::info!(
"Accepted Postgres connection for {:?}",
tls.get_ref().1.server_name()
);
match tokio::net::TcpStream::connect(&postgres_target).await {
Ok(postgres) => {
if let Err(e) = copy_bidirectional_compressed(postgres, tls).await {
eprintln!("Failed to copy data: {}", e);
}
}
Err(e) => {
eprintln!("Failed to connect to postgres: {}", e);
tls.shutdown().await.expect("Failed to shutdown TLS stream");
}
}
}
Some(sni) => {
log::warn!("Accepted connection for {:?}, but SNI {} does not match any configured SNI", tls.get_ref().1.server_name(), sni);
send_static_string(
&mut tls,
format!("SNI {} does not match any configured SNI", sni).as_str(),
)
.await
.expect("Failed to send static string");
tls.shutdown().await.expect("Failed to shutdown TLS stream");
}
_ => {
send_static_string(&mut tls, "No SNI provided")
.await
.expect("Failed to send static string");
eprintln!("No SNI provided");
tls.shutdown().await.expect("Failed to shutdown TLS stream");
}
},
Err(e) => {
eprintln!("Failed to accept connection: {}", e);
}
}
});
}
}
pub async fn forward_proxy(opts: ForwardProxyCommand) -> Result<(), Box<dyn std::error::Error>> {
let (_, ca_pem) = x509_parser::pem::parse_x509_pem(&std::fs::read(&opts.ca)?)?;
let (_, ca_cert) = x509_parser::parse_x509_certificate(&ca_pem.contents)?;
let mut cert_store = RootCertStore::empty();
cert_store.add(CertificateDer::from_pem_file(&opts.ca)?)?;
let cert_store = Arc::new(cert_store);
let mut crls = Vec::new();
for crl_def in &opts.crl {
#[cfg(feature = "remote-crl")]
{
// crls are signed so we can trust them
if crl_def.starts_with("http://") || crl_def.starts_with("https://") {
log::info!("Downloading CRL: {}", crl_def);
let crl = reqwest::get(crl_def).await?.bytes().await?;
let (_, parsed) = x509_parser::parse_x509_crl(&crl)?;
if let Err(e) = parsed.verify_signature(ca_cert.public_key()) {
log::error!("Failed to verify CRL signature: {}", e);
if !crl_def.starts_with("https://") {
return Err(e.into());
}
continue;
}
crls.push(crl.to_vec().into());
continue;
}
}
crls.push(std::fs::read(crl_def).expect("Failed to read CRL").into());
}
let cv = WebPkiServerVerifier::builder(cert_store.clone())
.with_crls(crls)
.build()
.expect("Failed to build server verifier");
let config = ClientConfig::builder()
.with_webpki_verifier(cv)
.with_client_auth_cert(
vec![CertificateDer::from_pem_file(&opts.cert)?],
PrivateKeyDer::from_pem_file(&opts.key)?,
)?;
let connector = TlsConnector::from(Arc::new(config));
let listener = tokio::net::TcpListener::bind(&opts.listen).await?;
log::info!("Listening on: {}", opts.listen);
let sni = ServerName::try_from(opts.sni.as_str()).expect("Failed to parse SNI");
loop {
let (pt_stream, _) = match listener.accept().await {
Ok(s) => s,
Err(e) => {
eprintln!("Failed to accept connection: {}", e);
continue;
}
};
let connector = connector.clone();
let tls_stream = match TcpStream::connect(&opts.target).await {
Ok(s) => s,
Err(e) => {
eprintln!("Failed to connect to target: {}", e);
continue;
}
};
let sni = sni.to_owned();
tokio::spawn(async move {
match connector.connect(sni, tls_stream).await {
Ok(tls) => {
if let Err(e) = copy_bidirectional_compressed(pt_stream, tls).await {
eprintln!("Failed to copy data: {}", e);
}
}
Err(e) => {
eprintln!("Failed to connect to target: {}", e);
}
}
});
}
}

344
src/ops/postgres.rs Normal file
View file

@ -0,0 +1,344 @@
use clap::Parser;
use sqlx::{Connection, PgConnection};
const DEFAULT_URL_ENV: &str = "DATABASE_URL";
#[derive(Debug, Parser)]
pub struct SetupPostgresMasterCommand {
#[clap(long, help = "Postgres Connection String, defaults to DATABASE_URL")]
pub connection_string: Option<String>,
#[clap(subcommand)]
pub subcmd: SetupPostgresMasterSubCommand,
}
#[derive(Debug, Parser)]
pub enum SetupPostgresMasterSubCommand {
#[clap(name = "setup")]
Setup(SetupPublicationCommand),
#[clap(name = "teardown")]
Drop(DropPublicationCommand),
#[clap(name = "add-table")]
AddTable(AddTableCommand),
#[clap(name = "drop-table")]
DropTable(DropTableCommand),
}
#[derive(Debug, Parser)]
pub struct SetupPublicationCommand {
#[clap(long, help = "Publication Name")]
pub publication: String,
#[clap(long, help = "Whitelist mode, each table must be added manually")]
pub whitelist: bool,
#[clap(long, help = "Publish Delete", default_value = "true")]
pub publish_delete: bool,
#[clap(long, help = "Publish Truncate")]
pub publish_truncate: bool,
#[clap(long, help = "Fail if publication already exists")]
pub must_not_exist: bool,
}
#[derive(Debug, Parser)]
pub struct DropPublicationCommand {
#[clap(long, help = "Publication Name")]
pub publication: String,
}
#[derive(Debug, Parser)]
pub struct DropTableCommand {
#[clap(long, help = "Publication Name")]
pub publication: String,
#[clap(short, long, help = "Table Name")]
pub table: Vec<String>,
}
#[derive(Debug, Parser)]
pub struct AddTableCommand {
#[clap(long, help = "Publication Name")]
pub publication: String,
#[clap(short, long, help = "Table Name")]
pub table: Vec<String>,
}
#[derive(Debug, Parser)]
pub struct SetupPostgresSlaveCommand {
#[clap(long, help = "Postgres Connection String, defaults to DATABASE_URL")]
pub connection_string: Option<String>,
#[clap(subcommand)]
pub subcmd: SetupPostgresSlaveSubCommand,
}
#[derive(Debug, Parser)]
pub enum SetupPostgresSlaveSubCommand {
#[clap(name = "setup")]
Setup(SetupSubscriptionCommand),
#[clap(name = "teardown")]
Drop(DropSubscriptionCommand),
}
#[derive(Debug, Parser)]
pub struct SetupSubscriptionCommand {
#[clap(long, help = "Publication Name")]
pub publication: String,
#[clap(long, help = "Subscription Name")]
pub subscription: String,
#[clap(long, help = "Two phase transaction")]
pub two_phase: bool,
}
#[derive(Debug, Parser)]
pub struct DropSubscriptionCommand {
#[clap(long, help = "Subscription Name")]
pub subscription: String,
}
pub fn postgres_connection_string_from_env() -> Option<String> {
std::env::var("DATABASE_URL").ok()
}
fn safe_ident(name: &str) -> Option<&str> {
if name
.as_bytes()
.iter()
.enumerate()
.all(|(i, &c)| i > 0 && c.is_ascii_digit() || c.is_ascii_lowercase() || c == b'_')
{
Some(name)
} else {
None
}
}
#[derive(Debug, thiserror::Error)]
pub enum PostgresSetupError {
#[error("Missing Connection String, set via DATABASE_URL")]
MissingConnection,
#[error("Entity already exists: {0}")]
AlreadyExists(String),
#[error("Invalid Identifier: {0}")]
InvalidIdentifier(String),
#[error("Postgres Error: {0}")]
PostgresError(#[from] sqlx::Error),
}
pub async fn setup_postgres_pub(
connection_string: Option<&str>,
opts: SetupPublicationCommand,
) -> Result<(), PostgresSetupError> {
let mut postgres = PgConnection::connect(
&&connection_string
.map(|s| s.to_string())
.or_else(postgres_connection_string_from_env)
.ok_or(PostgresSetupError::MissingConnection)?,
)
.await?;
let existing_pub =
sqlx::query_scalar::<_, i32>("SELECT 1 FROM pg_publication WHERE pubname = $1")
.bind(&opts.publication)
.fetch_optional(&mut postgres)
.await?;
if existing_pub.is_some() {
return if opts.must_not_exist {
Err(PostgresSetupError::AlreadyExists(
"Publication already exists".to_string(),
))
} else {
Ok(())
};
}
let query = format!(
"CREATE PUBLICATION {}{}{}",
safe_ident(&opts.publication)
.ok_or_else(|| PostgresSetupError::InvalidIdentifier(opts.publication.clone()))?,
if opts.whitelist {
""
} else {
" FOR TABLES IN SCHEMA public"
},
match (opts.publish_delete, opts.publish_truncate) {
(true, true) => " WITH (publish = 'insert, update, delete, truncate')",
(true, false) => " WITH (publish = 'insert, update, delete')",
(false, true) => {
log::warn!("Publishing truncate without delete does not make sense!");
" WITH (publish = 'insert, update, truncate')"
}
(false, false) => " WITH (publish = 'insert, update')",
}
);
log::info!("Executing: {}", query);
sqlx::query(&query).execute(&mut postgres).await?;
Ok(())
}
pub async fn drop_postgres_pub(
connection_string: Option<&str>,
opts: DropPublicationCommand,
) -> Result<(), PostgresSetupError> {
let mut postgres = PgConnection::connect(
&&connection_string
.map(|s| s.to_string())
.or_else(postgres_connection_string_from_env)
.ok_or(PostgresSetupError::MissingConnection)?,
)
.await?;
let query = format!(
"DROP PUBLICATION {}",
safe_ident(&opts.publication).unwrap()
);
log::info!("Executing: {}", query);
sqlx::query(&query).execute(&mut postgres).await?;
Ok(())
}
pub async fn add_table_to_postgres_pub(
connection_string: Option<&str>,
opts: AddTableCommand,
) -> Result<(), PostgresSetupError> {
let mut postgres = PgConnection::connect(
&&connection_string
.map(|s| s.to_string())
.or_else(postgres_connection_string_from_env)
.ok_or(PostgresSetupError::MissingConnection)?,
)
.await?;
for table in opts.table {
let query = format!(
"ALTER PUBLICATION {} ADD TABLE {}",
safe_ident(&opts.publication).unwrap(),
safe_ident(&table).unwrap()
);
log::info!("Executing: {}", query);
sqlx::query(&query).execute(&mut postgres).await?;
}
Ok(())
}
pub async fn drop_table_from_postgres_pub(
connection_string: Option<&str>,
opts: DropTableCommand,
) -> Result<(), PostgresSetupError> {
let mut postgres = PgConnection::connect(
&&connection_string
.map(|s| s.to_string())
.or_else(postgres_connection_string_from_env)
.ok_or(PostgresSetupError::MissingConnection)?,
)
.await?;
for table in opts.table {
let query = format!(
"ALTER PUBLICATION {} DROP TABLE {}",
safe_ident(&opts.publication).unwrap(),
safe_ident(&table).unwrap()
);
log::info!("Executing: {}", query);
sqlx::query(&query).execute(&mut postgres).await?;
}
Ok(())
}
pub async fn setup_postgres_sub(
connection_string: Option<&str>,
opts: SetupSubscriptionCommand,
) -> Result<(), PostgresSetupError> {
let mut postgres = PgConnection::connect(
&&connection_string
.map(|s| s.to_string())
.or_else(postgres_connection_string_from_env)
.ok_or(PostgresSetupError::MissingConnection)?,
)
.await?;
let existing_sub =
sqlx::query_scalar::<_, i32>("SELECT 1 FROM pg_subscription WHERE subname = $1")
.bind(&opts.subscription)
.fetch_optional(&mut postgres)
.await?;
if existing_sub.is_some() {
return Err(PostgresSetupError::AlreadyExists(
"Subscription already exists".to_string(),
));
}
let query = format!(
"CREATE SUBSCRIPTION {} CONNECTION $1 PUBLICATION {}{}",
safe_ident(&opts.subscription)
.ok_or_else(|| PostgresSetupError::InvalidIdentifier(opts.subscription.clone()))?,
safe_ident(&opts.publication).unwrap(),
if opts.two_phase {
" WITH (two_phase = on)"
} else {
""
}
);
log::info!(
"Executing: {} with $1 = {}",
query,
connection_string.unwrap_or(DEFAULT_URL_ENV)
);
sqlx::query(&query)
.bind(connection_string.unwrap_or(DEFAULT_URL_ENV))
.execute(&mut postgres)
.await?;
Ok(())
}
pub async fn drop_postgres_sub(
connection_string: Option<&str>,
opts: DropSubscriptionCommand,
) -> Result<(), PostgresSetupError> {
let mut postgres = PgConnection::connect(
&&connection_string
.map(|s| s.to_string())
.or_else(postgres_connection_string_from_env)
.ok_or(PostgresSetupError::MissingConnection)?,
)
.await?;
let query = format!(
"DROP SUBSCRIPTION {}",
safe_ident(&opts.subscription).unwrap()
);
log::info!("Executing: {}", query);
sqlx::query(&query).execute(&mut postgres).await?;
Ok(())
}

135
src/ops/service.rs Normal file
View file

@ -0,0 +1,135 @@
use std::path::Path;
use clap::Parser;
use serde::Deserialize;
use super::network::ReverseProxyCommand;
const DEF_CONFIG_FILE: &str = "/etc/replikey.toml";
const CA_CERT: &str = "ca.pem";
const SERVER_CERT: &str = "server.pem";
const SERVER_KEY: &str = "server.key";
const CLIENT_CERT: &str = "client.pem";
const CLIENT_KEY: &str = "client.key";
#[derive(Debug, Parser)]
pub struct ServiceCommand {
#[clap(subcommand)]
pub subcmd: ServiceSubCommand,
}
#[derive(Debug, Parser)]
pub enum ServiceSubCommand {
#[clap(name = "replicate-master")]
ReplicateMaster {
#[clap(short, long, default_value = DEF_CONFIG_FILE)]
config: String,
},
#[clap(name = "replicate-slave")]
ReplicateSlave {
#[clap(short, long, default_value = DEF_CONFIG_FILE)]
config: String,
},
}
#[derive(Debug, Deserialize)]
pub struct Config {
connection: ConnectionConfig,
}
#[derive(Debug, Deserialize)]
pub struct ConnectionConfig {
master: Option<MasterConfig>,
slave: Option<SlaveConfig>,
}
#[derive(Debug, Deserialize)]
pub struct MasterConfig {
listen: String,
redis_sni: String,
redis_target: String,
postgres_sni: String,
postgres_target: String,
workdir: Option<String>,
crl: Vec<String>,
}
#[derive(Debug, Deserialize)]
pub struct SlaveConfig {
listen: String,
redis_sni: String,
postgres_sni: String,
workdir: Option<String>,
crl: Vec<String>,
}
pub fn service_replicate_master(config: String) {
let config = std::fs::read_to_string(config).unwrap();
let config: Config = toml::from_str(&config).expect("Failed to parse config");
if let Some(wd) = config
.connection
.master
.as_ref()
.and_then(|m| m.workdir.as_ref())
{
std::env::set_current_dir(wd).expect("Failed to change directory");
}
let master_conf = config.connection.master.as_ref().unwrap();
let cmd = ReverseProxyCommand {
listen: master_conf.listen.clone(),
redis_sni: master_conf.redis_sni.clone(),
redis_target: master_conf.redis_target.clone(),
postgres_sni: master_conf.postgres_sni.clone(),
postgres_target: master_conf.postgres_target.clone(),
cert: Path::new(SERVER_CERT).to_string_lossy().to_string(),
key: Path::new(SERVER_KEY).to_string_lossy().to_string(),
ca: Path::new(CA_CERT).to_string_lossy().to_string(),
crl: config.connection.master.as_ref().unwrap().crl.clone(),
};
tokio::runtime::Runtime::new()
.unwrap()
.block_on(crate::ops::network::reverse_proxy(cmd))
.unwrap();
println!("Replication master started");
}
pub fn service_replicate_slave(config: String) {
let config = std::fs::read_to_string(config).unwrap();
let config: Config = toml::from_str(&config).expect("Failed to parse config");
if let Some(wd) = config
.connection
.slave
.as_ref()
.and_then(|m| m.workdir.as_ref())
{
std::env::set_current_dir(wd).expect("Failed to change directory");
}
let slave_conf = config.connection.slave.as_ref().unwrap();
let cmd = ReverseProxyCommand {
listen: slave_conf.listen.clone(),
redis_sni: slave_conf.redis_sni.clone(),
redis_target: slave_conf.redis_sni.clone(),
postgres_sni: slave_conf.postgres_sni.clone(),
postgres_target: slave_conf.postgres_sni.clone(),
cert: Path::new(CLIENT_CERT).to_string_lossy().to_string(),
key: Path::new(CLIENT_KEY).to_string_lossy().to_string(),
ca: Path::new(CA_CERT).to_string_lossy().to_string(),
crl: config.connection.slave.as_ref().unwrap().crl.clone(),
};
tokio::runtime::Runtime::new()
.unwrap()
.block_on(crate::ops::network::reverse_proxy(cmd))
.unwrap();
println!("Replication slave started");
}