1
0
Fork 0

bgpwtf/cccampix: draw the rest of the fucking owl

Change-Id: I49fd5906e69512e8f2d414f406edc0179522f225
master
q3k 2019-08-01 20:16:27 +02:00 committed by Serge Bazanski
parent ddfd6591f8
commit 1fad2e5c6e
53 changed files with 3008 additions and 41 deletions

View File

@ -12,6 +12,14 @@ http_archive(
sha256 = "2ef429f5d7ce7111263289644d233707dba35e39696377ebab8b0bc701f7818e",
)
# subpar
git_repository(
name = "subpar",
remote = "https://github.com/q3k/subpar",
commit = "5dd9fb4586616c69df9b3f5aba12f08f85d708d1",
)
# Docker rules
http_archive(
@ -80,6 +88,22 @@ pip_repository(
requirements = "//pip:requirements-linux.txt",
)
# stackb/rules_proto (for Python proto compilation)
http_archive(
name = "build_stack_rules_proto",
urls = ["https://github.com/stackb/rules_proto/archive/b93b544f851fdcd3fc5c3d47aee3b7ca158a8841.tar.gz"],
sha256 = "c62f0b442e82a6152fcd5b1c0b7c4028233a9e314078952b6b04253421d56d61",
strip_prefix = "rules_proto-b93b544f851fdcd3fc5c3d47aee3b7ca158a8841",
)
load("@build_stack_rules_proto//python:deps.bzl", "python_grpc_compile")
python_grpc_compile()
load("@com_github_grpc_grpc//bazel:grpc_deps.bzl", "grpc_deps")
grpc_deps()
# Docker base images
load("@io_bazel_rules_docker//container:container.bzl", "container_pull")
@ -88,8 +112,8 @@ container_pull(
name = "prodimage-bionic",
registry = "registry.k0.hswaw.net",
repository = "q3k/prodimage",
tag = "20190725-1806",
digest = "sha256:b3ad5f62813e2d88ce8cc4f2c9c8e260a3d0e517eda428275ee7976bdd8d6475",
tag = "20190810-1802",
digest = "sha256:ed0cbffacdb4bd62c193c753d2b6fb7e02115ce8571b0186ef3e1a12c031be38",
)
container_pull(
@ -198,7 +222,6 @@ load(
)
go_image_repositories()
# Go repositories
go_repository(
@ -661,3 +684,9 @@ go_repository(
commit = "cf67d735e69b4a4d50cdf571a92b0144786080f7",
importpath = "github.com/go-test/deep",
)
go_repository(
name = "com_github_sethvargo_go_password",
commit = "68ac5879751a7105834296859f8c1bf70b064675",
importpath = "github.com/sethvargo/go-password",
)

View File

@ -1,3 +1,5 @@
load("@io_bazel_rules_docker//container:container.bzl", "container_image", "container_layer", "container_push")
py_binary(
name = "sync",
srcs = [
@ -7,3 +9,33 @@ py_binary(
"@pip36//requests",
],
)
container_layer(
name = "layer_bin",
files = [
"//bgpwtf/cccampix/irr:irr",
"//bgpwtf/cccampix/peeringdb:peeringdb",
"//bgpwtf/cccampix/verifier:verifier",
"//bgpwtf/cccampix/frontend:frontend.par",
],
directory = "/ix/",
)
container_image(
name = "runtime",
base = "@prodimage-bionic//image",
layers = [
":layer_bin",
"//bgpwtf/cccampix/octorpki:layer_bin",
"//bgpwtf/cccampix/octorpki:layer_tals",
],
)
container_push(
name = "push",
image = ":runtime",
format = "Docker",
registry = "registry.k0.hswaw.net",
repository = "bgpwtf/cccampix",
tag = "{BUILD_TIMESTAMP}-{STABLE_GIT_COMMIT}",
)

View File

@ -0,0 +1,48 @@
load("@subpar//:subpar.bzl", "par_binary")
py_library(
name = "frontend_lib",
srcs = [
"frontend.py",
"channel.py",
],
data = glob([
"templates/**",
]),
deps = [
"@pip36//arrow",
"@pip36//flask",
"@pip36//grpcio",
"//bgpwtf/cccampix/proto:ix_py_proto",
],
)
py_binary(
name = "frontend_dev",
srcs = [
"dev.py",
],
main = "dev.py",
deps = [
":frontend_lib",
],
)
par_binary(
name = "frontend",
main = "server.py",
srcs = [
"server.py",
],
deps = [
":frontend_lib",
"@pip36//gevent",
"@pip36//gunicorn",
],
visibility = [
"//bgpwtf/cccampix:__pkg__",
],
legacy_create_init = False,
zip_safe = False,
no_remove = True,
)

View File

@ -0,0 +1,38 @@
import logging
from flask import current_app
from flask import _app_ctx_stack as stack
import grpc
logger = logging.getLogger(__name__)
class Channel:
def __init__(self, app, address):
self.app = app
self.address = address
self.stubs = {}
app.teardown_appcontext(self.teardown)
def _connect(self):
logger.info("Connecting to {}...".format(self.address))
return grpc.insecure_channel(self.address)
@property
def conn(self):
ctx = stack.top
if ctx is not None:
if not hasattr(ctx, 'conn'):
ctx.conn = self._connect()
return ctx.conn
def stub(self, stub):
if stub not in self.stubs:
self.stubs[stub] = stub(self.conn)
return self.stubs[stub]
def teardown(self, exception):
ctx = stack.top
if hasattr(ctx, 'conn'):
del ctx.conn

View File

@ -0,0 +1,8 @@
from bgpwtf.cccampix.frontend.frontend import create_app
config = {
'verifier': '127.0.0.1:4220',
}
app = create_app(config)
app.run(debug=True)

View File

@ -0,0 +1,64 @@
import logging
import arrow
import grpc
from flask import Flask, render_template
from flask.logging import default_handler
from bgpwtf.cccampix.proto import ix_pb2 as ipb
from bgpwtf.cccampix.proto import ix_pb2_grpc as ipb_grpc
from bgpwtf.cccampix.frontend.channel import Channel
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(message)s')
logger = logging.getLogger(__name__)
check_info = {
'irr': ('IRR', 'Required IRR entires are present for this AS'),
}
def create_app(config=None):
app = Flask(__name__)
app.config.update(config or {})
app.logger.removeHandler(default_handler)
verifier = Channel(app, config['verifier'])
@app.route('/')
def view_index():
req = ipb.ProcessorStatusRequest()
processors = verifier.stub(ipb_grpc.VerifierStub).ProcessorStatus.future(req)
req = ipb.PeerSummaryRequest()
peers = verifier.stub(ipb_grpc.VerifierStub).PeerSummary(req)
processors = sorted(processors.result().processors, key=lambda el: el.name)
peers = sorted(list(peers), key=lambda el: el.peeringdb_info.asn)
return render_template('index.html',
processors=processors, peers=peers)
@app.route('/asn/<int:asn>')
def view_asn(asn):
req = ipb.PeerDetailsRequest()
req.asn = asn
details = None
try:
details = verifier.stub(ipb_grpc.VerifierStub).PeerDetails(req)
except grpc.RpcError as e:
if e.code() == grpc.StatusCode.NOT_FOUND:
return 'No such ASN.'
else:
return 'Internal server error.'
return render_template('asn.html', details=details, asn=asn, check_info=check_info)
@app.template_filter()
def from_nano(v):
return arrow.get(v/1e9)
return app

View File

@ -0,0 +1,61 @@
import argparse
import logging
import multiprocessing
from gevent import monkey
monkey.patch_all()
import grpc.experimental.gevent as grpc_gevent
grpc_gevent.init_gevent()
from werkzeug.contrib.fixers import ProxyFix
import gunicorn.app.base
from bgpwtf.cccampix.frontend.frontend import create_app
class StandaloneApplication(gunicorn.app.base.BaseApplication):
def __init__(self, app, options=None):
self.options = options or {}
self.application = app
super(StandaloneApplication, self).__init__()
def load_config(self):
config = dict(
[
(key, value)
for key, value in options.items()
if key in self.cfg.settings and value is not None
]
)
for key, value in config.items():
self.cfg.set(key.lower(), value)
def load(self):
return self.application
parser = argparse.ArgumentParser(description="Run the IXP frontend")
parser.add_argument("--flask_secret", required=True, help="Flask App Secret")
parser.add_argument("--workers", type=int, default=4, help="Number of gunicorn workers to run")
parser.add_argument("--listen", default="0.0.0.0:8080", help="Listen address")
parser.add_argument("--verifier", default="127.0.0.1:4200", help="Address of verifier service")
if __name__ == '__main__':
args = parser.parse_args()
options = {
"bind": args.listen,
"workers": args.workers,
"sendfile": False,
"timeout": 240,
}
config = {
"verifier": args.verifier,
}
app = create_app(config)
app.secret_key = args.flask_secret
app = ProxyFix(app)
StandaloneApplication(app, options).run()

View File

@ -0,0 +1,106 @@
<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>AS{{ asn }} - CCCampIX Status Page</title>
<style type="text/css">
body {
background-color: #fff;
}
table, th, td {
background-color: #eee;
padding: 0.2em 0.4em 0.2em 0.4em;
}
.table th {
background-color: #c0c0c0;
}
.table {
background-color: #fff;
border-spacing: 0.2em;
}
</style>
</head>
<body>
<h1>AS{{ asn }} at CCCamp IX</h1>
<h2>Autonomous System</h2>
<p>
<b>ASN</b>: {{ asn }}<br />
<b>Network name</b>: {{ details.peeringdb_info.name }}<br />
</p>
<h2>Checks</h2>
<p>
Checks are prerequisites that need to pass for the ASN to be considered as part of the exchange.
</p>
<p>
<table class="table">
<tr>
<th>Name</th>
<th>Description</th>
<th>Status</th>
</tr>
{% for check in details.checks %}
<tr>
<td>{{ check_info.get(check.name)[0] }}</td>
<td>{{ check_info.get(check.name)[1] }}</td>
{% if check.status == 1 %}
<td style="background-color: lime">OK</td>
{% else %}
<td style="background-color: red">Failed: {{ check.msg }}</td>
{% endif %}
</tr>
{% endfor %}
</table>
</p>
<h2>AS{{ asn }} Routers</h2>
<p>
These are the routers that we'll be trying to reach from our Route Servers.
</p>
<p>
<table class="table">
<tr>
<th>IPv6 Address</th>
<th>Legacy Address</th>
</tr>
{% for router in details.peeringdb_info.routers %}
<tr>
{% if router.ipv6 %}
<td>{{ router.ipv6 }}</td>
{% else %}
<td><i>none</i></td>
{% endif %}
{% if router.ipv4 %}
<td>{{ router.ipv4 }}</td>
{% else %}
<td><i>none</i></td>
{% endif %}
</tr>
{% endfor %}
</table>
</p>
<h2>AS{{ asn }} Allowed Prefixes</h2>
<p>
Allowed prefixes pulled in from RPKI.
</p>
<p>
<table class="table">
<tr>
<th>Prefix</th>
<th>Max Length</th>
<th>Originating TA</th>
</tr>
{% for prefix in details.allowed_prefixes %}
<tr>
<td>{{ prefix.prefix }}</td>
<td>{{ prefix.max_length }}</td>
<td>{{ prefix.ta }}</td>
</tr>
{% else %}
<tr>
<td colspan="3"><i>no prefixes</i></td>
</tr>
{% endfor %}
</table>
</p>
</body>
</html>

View File

@ -0,0 +1,97 @@
<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>CCCampIX Status Page</title>
<style type="text/css">
body {
background-color: #fff;
}
table, th, td {
background-color: #eee;
padding: 0.2em 0.4em 0.2em 0.4em;
}
.table th {
background-color: #c0c0c0;
}
.table {
background-color: #fff;
border-spacing: 0.2em;
}
</style>
</head>
<body>
<h1>CCCamp IX Status</h1>
<p>
You can use this page to see how your CCCamp IX registration is progressing.
</p>
<h2>System Status</h2>
<p>
If any of the above processors are down, provisioning of new peers might be delayed.
</p>
<p>
<table class="table">
<tr>
<th>Processor</th>
<th>Status</th>
<th>Last Run</th>
<th>Next Run</th>
</tr>
{% for processor in processors %}
<tr>
<td>{{ processor.name }}</td>
{% if processor.status == 1 %}
<td style="background-color: lime;">OK</td>
{% else %}
<td style="background-color: red;">ERROR</td>
{% endif %}
<td>{{ (processor.last_run | from_nano).humanize() }}</td>
<td>{{ (processor.next_run | from_nano).humanize() }}</td>
</tr>
{% endfor %}
</table>
</p>
<h2>Participating Peers</h2>
<p>
If you don't see your ASN here, means you didn't add yourself to out <a href="https://www.peeringdb.com/ix/2641">PeeringDB IX</a>.
</p>
<p>
If some checks are failing for your ASN, see the click on the ASN to navigate to its status page. There you can get more details about failure conditions, checks passed and how to configure a session to the route servers and upstream router.
<p>
<table class="table">
<tr>
<th rowspan="2">ASN</th>
<th rowspan="2">Name</th>
<th rowspan="2">Checks Passed</th>
<th colspan="2">Routers</th>
</tr>
<tr>
<th>IPv6</th>
<th>Legacy</th>
</tr>
{% for peer in peers %}
{% set rowspan = peer.peeringdb_info.routers | length %}
<tr>
<td rowspan="{{rowspan}}"><a href="/asn/{{ peer.peeringdb_info.asn }}" name="as{{ peer.peeringdb_info.asn}}">{{ peer.peeringdb_info.asn }}</a></td>
<td rowspan="{{rowspan}}">{{ peer.peeringdb_info.name }}</td>
{% if peer.check_status == 1 %}
<td rowspan="{{rowspan}}" style="background-color: lime;">OK</td>
{% elif peer.check_status == 2 %}
<td rowspan="{{rowspan}}" style="background-color: red;">FAILED</td>
{% else %}
<td rowspan="{{rowspan}}" style="background-color: orange;">UNKNOWN</td>
{% endif %}
<td>{{ peer.peeringdb_info.routers[0].ipv6 or "none" }}</td>
<td>{{ peer.peeringdb_info.routers[0].ipv4 or "none" }}</td>
</tr>
{% for router in peer.peeringdb_info.routers[1:] %}
<tr>
<td>{{ router.ipv6 or "none" }}</td>
<td>{{ router.ipv4 or "none" }}</td>
</tr>
{% endfor %}
{% endfor %}
</table>
</p>
</body>
</html>

View File

@ -5,15 +5,41 @@ local kube = import "../../../kube/kube.libsonnet";
local ix = self,
local cfg = ix.cfg,
cfg:: {
image: "registry.k0.hswaw.net/bgpwtf/cccampix:1565559239-95928eecd7e35e8582fa011d1457643ca398c310",
domain: "ix-status.bgp.wtf",
octorpki: {
image: "registry.k0.hswaw.net/q3k/octorpki:1565367301-909a166cac3bdade6d5b8344fa590aa884114911",
image: "registry.k0.hswaw.net/bgpwtf/cccampix:1565469898-95928eecd7e35e8582fa011d1457643ca398c310",
storageClassName: "waw-hdd-redundant-2",
resources: {
requests: { cpu: "100m", memory: "500Mi" },
limits: { cpu: "500m", memory: "1Gi" },
requests: { cpu: "200m", memory: "1Gi" },
limits: { cpu: "1", memory: "2Gi" },
},
},
verifier: {
image: cfg.image,
db: {
host: "public.crdb-waw1.svc.cluster.local",
port: 26257,
username: "cccampix",
name: "cccampix",
tlsSecret: "client-cccampix-certificate",
},
},
irr: {
image: cfg.image,
},
peeringdb: {
image: cfg.image,
},
frontend: {
image: cfg.image,
},
appName: "ix",
namespace: error "namespace must be defined",
prefix: "",
@ -31,6 +57,11 @@ local kube = import "../../../kube/kube.libsonnet";
},
octorpki: {
address:: "%s.%s.svc.cluster.local:%d" % [
"octorpki",
ix.cfg.namespace,
8080,
],
cache: kube.PersistentVolumeClaim(ix.name("octorpki")) {
metadata+: ix.metadata("octorpki"),
spec+: {
@ -80,5 +111,131 @@ local kube = import "../../../kube/kube.libsonnet";
},
},
},
component(name):: {
local component = self,
args:: error "args must be set",
name:: name,
port:: 4200,
volumes:: {},
volumeMounts:: {},
deployment: kube.Deployment(ix.name(name)) {
metadata+: ix.metadata(name),
spec+: {
template+: {
spec+: {
volumes_: component.volumes,
containers_: {
[name]: kube.Container(ix.name(name)) {
image: cfg[name].image,
args: component.args,
volumeMounts_: component.volumeMounts,
},
},
},
},
},
},
svc: kube.Service(ix.name(name)) {
metadata+: ix.metadata(name),
target_pod:: component.deployment.spec.template,
spec+: {
ports: [
{ name: "client", port: component.port, targetPort: component.port, protocol: "TCP" },
],
},
},
address:: "%s.%s.svc.cluster.local:%d" % [
component.name,
ix.cfg.namespace,
component.port,
],
},
irr: ix.component("irr") {
args: [
"/ix/irr",
"-hspki_disable",
"-listen_address=0.0.0.0:4200",
],
},
peeringdb: ix.component("peeringdb") {
args: [
"/ix/peeringdb",
"-hspki_disable",
"-listen_address=0.0.0.0:4200",
],
},
verifier: ix.component("verifier") {
volumes: {
tls: {
secret: {
secretName: cfg.verifier.db.tlsSecret,
defaultMode: kube.parseOctal("0400"),
},
},
},
volumeMounts: {
tls: {
mountPath: "/tls",
},
},
args: [
"/ix/verifier",
"-hspki_disable",
"-dsn", "postgres://%s@%s:%d/%s?sslmode=require&sslrootcert=%s&sslcert=%s&sslkey=%s" % [
cfg.verifier.db.username,
cfg.verifier.db.host,
cfg.verifier.db.port,
cfg.verifier.db.name,
"/tls/ca.crt",
"/tls/tls.crt",
"/tls/tls.key",
],
"-peeringdb=" + ix.peeringdb.address,
"-irr=" + ix.irr.address,
"-listen_address=0.0.0.0:4200",
"-octorpki=" + ix.octorpki.address,
],
},
frontend: ix.component("frontend") {
port: 8080,
args: [
"/ix/frontend.par",
"--flask_secret=dupa",
"--listen=0.0.0.0:8080",
"--verifier=" + ix.verifier.address,
],
},
ingress: kube.Ingress("ingress") {
metadata+: ix.metadata("public") {
annotations+: {
"kubernetes.io/tls-acme": "true",
"certmanager.k8s.io/cluster-issuer": "letsencrypt-prod",
"nginx.ingress.kubernetes.io/proxy-body-size": "0",
},
},
spec+: {
tls: [
{ hosts: [cfg.domain], secretName: "public-tls"}
],
rules: [
{
host: cfg.domain,
http: {
paths: [
{ path: "/", backend: ix.frontend.svc.name_port },
],
},
},
],
},
},
},
}

View File

@ -7,29 +7,12 @@ container_layer(
"entrypoint.sh",
],
directory = "/octorpki/",
visibility = ["//bgpwtf/cccampix:__pkg__"],
)
container_layer(
name = "layer_tals",
files = glob(["tals/*"]),
directory = "/octorpki/tals/",
)
container_image(
name = "octorpki",
base = "@prodimage-bionic//image",
entrypoint = "/octorpki/entrypoint.sh",
layers = [
":layer_bin",
":layer_tals",
],
)
container_push(
name = "push",
image = ":octorpki",
format = "Docker",
registry = "registry.k0.hswaw.net",
repository = "q3k/octorpki",
tag = "{BUILD_TIMESTAMP}-{STABLE_GIT_COMMIT}",
visibility = ["//bgpwtf/cccampix:__pkg__"],
)

View File

@ -1,5 +1,6 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
load("@build_stack_rules_proto//python:python_grpc_compile.bzl", "python_grpc_compile")
proto_library(
name = "ix_proto",
@ -21,3 +22,17 @@ go_library(
importpath = "code.hackerspace.pl/hscloud/bgpwtf/cccampix/proto",
visibility = ["//visibility:public"],
)
python_grpc_compile(
name = "ix_py_proto_src",
deps = [":ix_proto"],
)
py_library(
name = "ix_py_proto",
srcs = ["ix_py_proto_src",],
visibility = ["//visibility:public"],
deps = [
"@pip36//protobuf",
],
)

View File

@ -20,19 +20,8 @@ message PeeringDBMember {
repeated Router routers = 3;
}
message GetIXMembersResponse {
message Member {
int64 asn = 1;
// Per PeeringDB, at least one of the following two address families
// will be set.
string ipv4 = 2;
string ipv6 = 3;
// AS/network name.
string name = 4;
};
repeated Member members = 1;
repeated PeeringDBMember members = 1;
}
service PeeringDBProxy {
@ -66,7 +55,7 @@ message IRRAttribute {
ImportExport export = 3;
}
}
message IRRQueryResponse {
enum Source {
SOURCE_INVALID = 0;
@ -82,6 +71,71 @@ service IRR {
rpc Query(IRRQueryRequest) returns (IRRQueryResponse);
}
message ProcessorStatusRequest {
}
message ProcessorStatusResponse {
message Processor {
enum Status {
STATUS_INVALID = 0;
STATUS_OK = 1;
STATUS_ERROR = 2;
}
string name = 1;
Status status = 2;
int64 last_run = 3;
int64 next_run = 4;
}
repeated Processor processors = 1;
}
message PeerSummaryRequest {
}
message PeerSummaryResponse {
PeeringDBMember peeringdb_info = 1;
enum Status {
STATUS_INVALID = 0;
STATUS_OK = 1;
STATUS_FAILED = 2;
STATUS_UNKNOWN = 3;
}
Status check_status = 2;
}
message PeerDetailsRequest {
int64 asn = 1;
}
message PeerDetailsResponse {
message Check {
enum Status {
STATUS_INVALID = 0;
STATUS_OK = 1;
STATUS_FAILED = 2;
};
string name = 1;
Status status = 2;
int64 time = 3;
string msg = 4;
};
repeated Check checks = 1;
message AllowedPrefix {
string prefix = 1;
int64 max_length = 2;
string ta = 3;
};
repeated AllowedPrefix allowed_prefixes = 2;
PeeringDBMember peeringdb_info = 3;
}
service Verifier {
rpc ProcessorStatus(ProcessorStatusRequest) returns (ProcessorStatusResponse);
rpc PeerSummary(PeerSummaryRequest) returns (stream PeerSummaryResponse);
rpc PeerDetails(PeerDetailsRequest) returns (PeerDetailsResponse);
}
message KeyInfoRequest {
// Public key fingerprint. 20 bytes.
bytes fingerprint = 1;
@ -137,4 +191,3 @@ service PGPEncryptor {
// Encrypt encrypts a given data blob with a given key from public keyserver infrastructure.
// If key doesn't exist, error (NotFound).
rpc Encrypt(stream EncryptRequest) returns (stream EncryptResponse);
}

View File

@ -0,0 +1,38 @@
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
go_library(
name = "go_default_library",
srcs = [
"main.go",
"processor_irr.go",
"processor_peeringdb.go",
"processor_rpki.go",
"processor_secretgen.go",
"processors.go",
"state.go",
"statusz.go",
],
importpath = "code.hackerspace.pl/hscloud/bgpwtf/cccampix/verifier",
visibility = ["//visibility:private"],
deps = [
"//bgpwtf/cccampix/proto:go_default_library",
"//bgpwtf/cccampix/verifier/model:go_default_library",
"//go/mirko:go_default_library",
"//go/pki:go_default_library",
"//go/statusz:go_default_library",
"@com_github_dustin_go_humanize//:go_default_library",
"@com_github_golang_glog//:go_default_library",
"@com_github_lib_pq//:go_default_library",
"@com_github_sethvargo_go_password//password:go_default_library",
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_grpc//codes:go_default_library",
"@org_golang_google_grpc//status:go_default_library",
"@org_golang_x_net//trace:go_default_library",
],
)
go_binary(
name = "verifier",
embed = [":go_default_library"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,319 @@
package main
import (
"context"
"flag"
"fmt"
"sync"
"time"
pb "code.hackerspace.pl/hscloud/bgpwtf/cccampix/proto"
"code.hackerspace.pl/hscloud/bgpwtf/cccampix/verifier/model"
"code.hackerspace.pl/hscloud/go/mirko"
"code.hackerspace.pl/hscloud/go/statusz"
"github.com/golang/glog"
"github.com/lib/pq"
"golang.org/x/net/trace"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
type processorState struct {
name string
p processor
lastRun *time.Time
lastErr error
}
func (p *processorState) nextRun() *time.Time {
if p.lastRun == nil {
return nil
}
nr := p.p.NextRun(*p.lastRun)
return &nr
}
type service struct {
model model.Model
processors map[string]*processorState
processorsMu sync.RWMutex
requiredChecks []string
}
func (s *service) run(ctx context.Context) {
t := time.NewTicker(time.Second)
for {
select {
case <-ctx.Done():
return
case <-t.C:
break
}
s.runProcessors(ctx)
}
}
func (s *service) runProcessors(ctx context.Context) {
s.processorsMu.RLock()
defer s.processorsMu.RUnlock()
now := time.Now()
for _, p := range s.processors {
nr := p.nextRun()
if nr == nil || nr.Before(now) {
glog.Infof("Running processor %q...", p.name)
tr := trace.New(fmt.Sprintf("processor.%s", p.name), "Run")
pctx := trace.NewContext(ctx, tr)
err := p.p.RunAll(pctx, s.model)
tr.LazyPrintf("Processor done: %v", err)
tr.Finish()
if err != nil {
glog.Errorf("Running processor %q failed: %v", p.name, err)
}
p.lastErr = err
p.lastRun = &now
}
}
}
var (
flagDSN string
flagPeeringDB string
flagIRR string
flagOctoRPKI string
)
func main() {
flag.StringVar(&flagDSN, "dsn", "", "PostrgreSQL connection string")
flag.StringVar(&flagPeeringDB, "peeringdb", "", "Address of peeringdb service")
flag.StringVar(&flagIRR, "irr", "", "Address of irr service")
flag.StringVar(&flagOctoRPKI, "octorpki", "", "Address of octorpki service")
flag.Parse()
// Picking an existing postgres-like driver for sqlx.BindType to work
// See: https://github.com/jmoiron/sqlx/blob/ed7c52c43ee1e12a35efbcfea8dbae2d62a90370/bind.go#L24
mirko.TraceSQL(&pq.Driver{}, "pgx")
mi := mirko.New()
m, err := model.Connect(mi.Context(), "pgx", flagDSN)
if err != nil {
glog.Exitf("Failed to create model: %v", err)
}
err = m.MigrateUp()
if err != nil {
glog.Exitf("Failed to migrate up: %v", err)
}
if err := mi.Listen(); err != nil {
glog.Exitf("Listen failed: %v", err)
}
s := &service{
model: m,
processors: make(map[string]*processorState),
requiredChecks: []string{"irr"},
}
must := func(p processor, err error) processor {
if err != nil {
panic(err)
}
return p
}
s.addProcessor(must(newPeeringDB(flagPeeringDB)))
s.addProcessor(must(newIRR(flagIRR)))
s.addProcessor(must(newSecretGen()))
s.addProcessor(must(newRPKI(flagOctoRPKI)))
statusz.AddStatusPart("Processors", processorsFragment, s.statuszProcessors)
go s.run(mi.Context())
pb.RegisterVerifierServer(mi.GRPC(), s)
if err := mi.Serve(); err != nil {
glog.Exitf("Serve failed: %v", err)
}
<-mi.Done()
}
func (s *service) addProcessor(p processor) {
s.processorsMu.Lock()
defer s.processorsMu.Unlock()
name := p.Name()
if _, ok := s.processors[name]; ok {
panic(fmt.Sprintf("duplicated processor %q", name))
}
s.processors[name] = &processorState{
name: name,
p: p,
lastRun: nil,
}
}
func (s *service) ProcessorStatus(ctx context.Context, req *pb.ProcessorStatusRequest) (*pb.ProcessorStatusResponse, error) {
s.processorsMu.RLock()
defer s.processorsMu.RUnlock()
res := &pb.ProcessorStatusResponse{
Processors: make([]*pb.ProcessorStatusResponse_Processor, len(s.processors)),
}
i := 0
for _, p := range s.processors {
res.Processors[i] = &pb.ProcessorStatusResponse_Processor{
Name: p.name,
Status: pb.ProcessorStatusResponse_Processor_STATUS_OK,
LastRun: 0,
NextRun: 0,
}
if p.lastRun != nil {
res.Processors[i].LastRun = p.lastRun.UnixNano()
res.Processors[i].NextRun = p.p.NextRun(*p.lastRun).UnixNano()
}
if p.lastErr != nil {
res.Processors[i].Status = pb.ProcessorStatusResponse_Processor_STATUS_ERROR
}
i += 1
}
return res, nil
}
func (s *service) PeerSummary(req *pb.PeerSummaryRequest, stream pb.Verifier_PeerSummaryServer) error {
peers, err := s.model.GetCheckablePeers(stream.Context())
if err != nil {
glog.Errorf("model.GetCheckablePeers: %v", err)
return status.Error(codes.Unavailable, "model error")
}
asns := make([]int64, len(peers))
asnToRes := make(map[int64]*pb.PeerSummaryResponse)
for i, peer := range peers {
routers := make([]*pb.PeeringDBMember_Router, len(peer.Routers))
for i, router := range peer.Routers {
routers[i] = &pb.PeeringDBMember_Router{}
if router.V4 != nil {
routers[i].Ipv4 = router.V4.String()
}
if router.V6 != nil {
routers[i].Ipv6 = router.V6.String()
}
}
p := &pb.PeeringDBMember{
Asn: peer.ASN,
Name: peer.Name,
Routers: routers,
}
res := &pb.PeerSummaryResponse{
PeeringdbInfo: p,
CheckStatus: pb.PeerSummaryResponse_STATUS_OK,
}
asnToRes[peer.ASN] = res
asns[i] = peer.ASN
}
checkres, err := s.model.GetPeerCheckResults(stream.Context(), asns)
if err != nil {
glog.Errorf("GetPeerCheckResults(%v): %v", asns, err)
for _, res := range asnToRes {
res.CheckStatus = pb.PeerSummaryResponse_STATUS_UNKNOWN
}
} else {
passedChecks := make(map[int64]map[string]bool)
for _, c := range checkres {
if _, ok := passedChecks[c.PeerASN]; !ok {
passedChecks[c.PeerASN] = make(map[string]bool)
}
passedChecks[c.PeerASN][c.CheckName] = c.Status == model.PeerCheckStatus_Okay
}
for asn, checks := range passedChecks {
for _, required := range s.requiredChecks {
if !checks[required] {
asnToRes[asn].CheckStatus = pb.PeerSummaryResponse_STATUS_FAILED
break
}
}
}
}
for _, res := range asnToRes {
if err := stream.Send(res); err != nil {
return err
}
}
return nil
}
func (s *service) PeerDetails(ctx context.Context, req *pb.PeerDetailsRequest) (*pb.PeerDetailsResponse, error) {
if req.Asn <= 0 {
return nil, status.Error(codes.InvalidArgument, "asn must be set")
}
res := &pb.PeerDetailsResponse{}
peeringdb, err := s.model.GetPeeringDBPeer(ctx, req.Asn)
if err != nil {
glog.Errorf("GetPeeringDBPeer(%v): %v", req.Asn, err)
return nil, status.Error(codes.Unavailable, "could not get allowed prefixes")
}
if peeringdb.Asn != req.Asn {
return nil, status.Error(codes.NotFound, "no such ASN")
}
res.PeeringdbInfo = peeringdb
checkres, err := s.model.GetPeerCheckResults(ctx, []int64{req.Asn})
if err != nil {
glog.Errorf("GetPeerCheckResults(%v): %v", req.Asn, err)
return nil, status.Error(codes.Unavailable, "could not get check results")
}
res.Checks = make([]*pb.PeerDetailsResponse_Check, len(checkres))
for i, check := range checkres {
status := pb.PeerDetailsResponse_Check_STATUS_INVALID
switch check.Status {
case model.PeerCheckStatus_Okay:
status = pb.PeerDetailsResponse_Check_STATUS_OK
case model.PeerCheckStatus_SoftFailed:
status = pb.PeerDetailsResponse_Check_STATUS_OK
case model.PeerCheckStatus_Failed:
status = pb.PeerDetailsResponse_Check_STATUS_FAILED
}
res.Checks[i] = &pb.PeerDetailsResponse_Check{
Name: check.CheckName,
Status: status,
Time: check.Time.UnixNano(),
Msg: check.Message,
}
}
prefixes, err := s.model.GetAllowedPrefixes(ctx, req.Asn)
if err != nil {
glog.Errorf("GetAllowedPrefixes(%v): %v", req.Asn, err)
return nil, status.Error(codes.Unavailable, "could not get allowed prefixes")
}
res.AllowedPrefixes = make([]*pb.PeerDetailsResponse_AllowedPrefix, len(prefixes))
for i, prefix := range prefixes {
res.AllowedPrefixes[i] = &pb.PeerDetailsResponse_AllowedPrefix{
Prefix: prefix.Prefix.String(),
MaxLength: prefix.MaxLength,
Ta: prefix.TA,
}
}
return res, nil
}

View File

@ -0,0 +1,28 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"allowed_prefixes.go",
"checkable_peers.go",
"config.go",
"get_checks.go",
"model.go",
"peer_routers.go",
"peers.go",
"pgp.go",
"schema.go",
"submit_checks.go",
],
importpath = "code.hackerspace.pl/hscloud/bgpwtf/cccampix/verifier/model",
visibility = ["//visibility:public"],
deps = [
"//bgpwtf/cccampix/proto:go_default_library",
"//bgpwtf/cccampix/verifier/model/migrations:go_default_library",
"@com_github_golang_glog//:go_default_library",
"@com_github_golang_migrate_migrate_v4//:go_default_library",
"@com_github_golang_migrate_migrate_v4//database/cockroachdb:go_default_library",
"@com_github_jmoiron_sqlx//:go_default_library",
"@com_github_lib_pq//:go_default_library",
],
)

View File

@ -0,0 +1,87 @@
package model
import (
"context"
"database/sql"
"fmt"
"net"
"time"
)
func (s *sqlModel) UpdateAllowedPrefixes(ctx context.Context, asn int64, prefixes []*AllowedPrefix) error {
tx := s.db.MustBeginTx(ctx, &sql.TxOptions{})
defer tx.Rollback()
timestamp := time.Now().UnixNano()
for _, prefix := range prefixes {
q := `
INSERT INTO allowed_prefixes
(peer_id, timestamp, prefix, max_length, ta)
SELECT
peers.id, :timestamp, :prefix, :max_length, :ta
FROM peers
WHERE peers.asn = :asn
ON CONFLICT (peer_id, prefix)
DO UPDATE SET
timestamp = :timestamp,
max_length = :max_length,
ta = :ta
`
ap := sqlAllowedPrefix{
Timestamp: timestamp,
Prefix: prefix.Prefix.String(),
MaxLength: prefix.MaxLength,
TA: prefix.TA,
ASN: fmt.Sprintf("%d", asn),
}
if _, err := tx.NamedExecContext(ctx, q, ap); err != nil {
return fmt.Errorf("INSERT allowed_prefixes: %v", err)
}
}
q := `
DELETE FROM allowed_prefixes
WHERE timestamp != $1
AND peer_id = (SELECT peers.id FROM peers WHERE peers.asn = $2)
`
if _, err := tx.ExecContext(ctx, q, timestamp, asn); err != nil {
return fmt.Errorf("DELETE FROM allowed_prefixes: %v", err)
}
return tx.Commit()
}
func (s *sqlModel) GetAllowedPrefixes(ctx context.Context, asn int64) ([]*AllowedPrefix, error) {
q := `
SELECT
allowed_prefixes.prefix,
allowed_prefixes.max_length,
allowed_prefixes.ta
FROM
allowed_prefixes
LEFT JOIN peers
ON peers.id = allowed_prefixes.peer_id
WHERE peers.asn = $1
`
data := []sqlAllowedPrefix{}
if err := s.db.SelectContext(ctx, &data, q, asn); err != nil {
return nil, fmt.Errorf("SELECT allowed_prefixes: %v", err)
}
res := make([]*AllowedPrefix, len(data))
for i, d := range data {
_, prefix, err := net.ParseCIDR(d.Prefix)
if err != nil {
return nil, fmt.Errorf("corrupted CIDR in database: %v", err)
}
res[i] = &AllowedPrefix{
Prefix: *prefix,
MaxLength: d.MaxLength,
TA: d.TA,
}
}
return res, nil
}

View File

@ -0,0 +1,76 @@
package model
import (
"context"
"fmt"
"net"
"strconv"
)
func (m *sqlModel) GetCheckablePeers(ctx context.Context) ([]*Peer, error) {
data := []struct {
sqlPeer `db:"peers"`
sqlPeerRouter `db:"peer_routers"`
}{}
q := `
SELECT
peers.id "peers.id",
peers.asn "peers.asn",
peers.name "peers.name",
peer_routers.peer_id "peer_routers.peer_id",
peer_routers.v6 "peer_routers.v6",
peer_routers.v4 "peer_routers.v4"
FROM peers
LEFT JOIN peer_routers
ON peer_routers.peer_id = peers.id
`
if err := m.db.SelectContext(ctx, &data, q); err != nil {
return nil, fmt.Errorf("SELECT peers/peerRouters: %v", err)
}
// Collapse peers into map
// ID -> Peer
peers := make(map[string]*Peer)
for _, row := range data {
peer, ok := peers[row.sqlPeer.ID]
if !ok {
asn, err := strconv.ParseInt(row.sqlPeer.ASN, 10, 64)
if err != nil {
return nil, fmt.Errorf("data corruption: invalid ASN %q", row.sqlPeer.ASN)
}
peer = &Peer{
ASN: asn,
Name: row.sqlPeer.Name,
Routers: []*Router{},
}
peers[row.sqlPeer.ID] = peer
}
var v6 net.IP
var v4 net.IP
if row.sqlPeerRouter.V6.Valid {
v6 = net.ParseIP(row.sqlPeerRouter.V6.String)
}
if row.sqlPeerRouter.V4.Valid {
v4 = net.ParseIP(row.sqlPeerRouter.V4.String)
}
peer.Routers = append(peer.Routers, &Router{
V6: v6,
V4: v4,
})
}
res := make([]*Peer, len(peers))
i := 0
for _, peer := range peers {
res[i] = peer
i += 1
}
return res, nil
}

View File

@ -0,0 +1,51 @@
package model
import (
"context"
"database/sql"
"fmt"
)
func (m *sqlModel) ConfigureMissingSessions(ctx context.Context, gen func() SessionConfig) error {
tx := m.db.MustBeginTx(ctx, &sql.TxOptions{})
defer tx.Rollback()
q := `
SELECT
peer_routers.peer_id "peer_id",
peer_routers.id "id"
FROM peer_routers
WHERE peer_routers.id NOT IN (
SELECT session_configs.peer_router_id
FROM session_configs
)
`
missing := []struct {
PeerID string `db:"peer_id"`
ID string `db:"id"`
}{}
if err := m.db.SelectContext(ctx, &missing, q); err != nil {
return fmt.Errorf("SELECT peerRouters: %v", err)
}
for _, m := range missing {
config := gen()
q = `
INSERT INTO
session_configs
(peer_id, peer_router_id, bgp_secret)
VALUES
(:peer_id, :peer_router_id, :bgp_secret)
`
data := sqlSessionConfig{
PeerID: m.PeerID,
PeerRouterID: m.ID,
BGPSecret: config.BGPSecret,
}
if _, err := tx.NamedExecContext(ctx, q, data); err != nil {
return err
}
}
return tx.Commit()
}

View File

@ -0,0 +1,70 @@
package model
import (
"context"
"fmt"
"strconv"
"time"
"github.com/golang/glog"
"github.com/jmoiron/sqlx"
)
func (s *sqlModel) GetPeerCheckResults(ctx context.Context, asn []int64) ([]*PeerCheckResult, error) {
asns := make([]string, len(asn))
for i, asn := range asn {
asns[i] = fmt.Sprintf("%d", asn)
}
data := []struct {
sqlPeer `db:"peers"`
sqlPeerCheck `db:"peer_checks"`
}{}
q := `
SELECT
peers.asn "peers.asn",
peer_checks.check_name "peer_checks.check_name",
peer_checks.check_time "peer_checks.check_time",
peer_checks.check_status "peer_checks.check_status",
peer_checks.check_message "peer_checks.check_message"
FROM peers
LEFT JOIN peer_checks
ON peers.id = peer_checks.peer_id
WHERE peers.asn IN (?)
`
query, args, err := sqlx.In(q, asns)
if err != nil {
return nil, fmt.Errorf("SELECT peers: %v", err)
}
query = s.db.Rebind(query)
if err := s.db.SelectContext(ctx, &data, query, args...); err != nil {
return nil, fmt.Errorf("SELECT peers: %v", err)
}
res := make([]*PeerCheckResult, len(data))
for i, d := range data {
asn, err := strconv.ParseInt(d.sqlPeer.ASN, 10, 64)
if err != nil {
return nil, err
}
status := PeerCheckStatus_Invalid
switch d.sqlPeerCheck.CheckStatus {
case "okay":
status = PeerCheckStatus_Okay
case "failed":
status = PeerCheckStatus_Failed
default:
glog.Errorf("Unhandled check status %q", d.sqlPeerCheck.CheckStatus)
}
res[i] = &PeerCheckResult{
PeerASN: asn,
CheckName: d.sqlPeerCheck.CheckName,
Time: time.Unix(0, d.sqlPeerCheck.CheckTime),
Status: status,
Message: d.sqlPeerCheck.CheckMessage,
}
}
return res, nil
}

View File

@ -0,0 +1,2 @@
DROP TABLE peers;
DROP TABLE peer_routers;

View File

@ -0,0 +1,27 @@
CREATE TABLE peers (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
asn STRING NOT NULL,
name STRING NOT NULL,
source STRING check (
source = 'from-peeringdb' or
source = 'manual'
) NOT NULL,
UNIQUE (asn)
);
CREATE TABLE peer_routers (
peer_id UUID NOT NULL,
id UUID DEFAULT gen_random_uuid(),
v6 STRING,
v4 STRING,
source STRING check (
source = 'from-peeringdb' or
source = 'manual'
) NOT NULL,
UNIQUE (v4),
UNIQUE (v6),
PRIMARY KEY (peer_id, id),
CONSTRAINT fk_peer FOREIGN KEY (peer_id) REFERENCES peers (id) ON DELETE CASCADE
) INTERLEAVE IN PARENT peers (peer_id);

View File

@ -0,0 +1 @@
DROP TABLE peer_checks;

View File

@ -0,0 +1,18 @@
CREATE TABLE peer_checks (
peer_id UUID NOT NULL,
id UUID DEFAULT gen_random_uuid(),
check_name STRING NOT NULL,
check_time INT NOT NULL,
check_status STRING check (
check_status = 'unknown' or
check_status = 'okay' or
check_status = 'failed'
) NOT NULL,
check_message STRING NOT NULL,
delete BOOL NOT NULL,
UNIQUE(peer_id, check_name),
PRIMARY KEY (peer_id, id),
CONSTRAINT fk_peer FOREIGN KEY (peer_id) REFERENCES peers (id) ON DELETE CASCADE
) INTERLEAVE IN PARENT peers (peer_id);

View File

@ -0,0 +1,11 @@
CREATE TABLE peer_pgp_keys (
peer_id UUID NOT NULL,
id UUID DEFAULT gen_random_uuid(),
fingerprint STRING NOT NULL,
time_created INT NOT NULL,
UNIQUE (peer_id),
PRIMARY KEY (peer_id, id),
CONSTRAINT fk_peer FOREIGN KEY (peer_id) REFERENCES peers (id) ON DELETE CASCADE
) INTERLEAVE IN PARENT peers (peer_id);

View File

@ -0,0 +1 @@
DROP TABLE session_configs;

View File

@ -0,0 +1,11 @@
CREATE TABLE session_configs (
peer_id UUID NOT NULL,
peer_router_id UUID NOT NULL,
id UUID DEFAULT gen_random_uuid(),
bgp_secret STRING NOT NULL,
UNIQUE (peer_router_id),
PRIMARY KEY (peer_id, peer_router_id, id),
CONSTRAINT fk_peer_router FOREIGN KEY (peer_id, peer_router_id) REFERENCES peer_routers ON DELETE CASCADE
) INTERLEAVE IN PARENT peer_routers (peer_id, peer_router_id);

View File

@ -0,0 +1 @@
DROP TABLE allowed_prefix;

View File

@ -0,0 +1,13 @@
CREATE TABLE allowed_prefixes (
peer_id UUID NOT NULL,
id UUID DEFAULT gen_random_uuid(),
timestamp INT NOT NULL,
prefix STRING NOT NULL,
max_length INT NOT NULL,
ta STRING NOT NULL,
PRIMARY KEY (peer_id, id),
UNIQUE (peer_id, prefix),
CONSTRAINT fk_peer FOREIGN KEY (peer_id) REFERENCES peers (id) ON DELETE CASCADE
) INTERLEAVE IN PARENT peers (peer_id);

View File

@ -0,0 +1,23 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
load("@io_bazel_rules_go//extras:embed_data.bzl", "go_embed_data")
go_embed_data(
name = "migrations_data",
srcs = glob(["*.sql"]),
package = "migrations",
flatten = True,
)
go_library(
name = "go_default_library",
srcs = [
"migrations.go",
":migrations_data", # keep
],
importpath = "code.hackerspace.pl/hscloud/bgpwtf/cccampix/verifier/model/migrations",
visibility = ["//bgpwtf/cccampix/verifier/model:__subpackages__"],
deps = [
"//go/mirko:go_default_library",
"@com_github_golang_migrate_migrate_v4//:go_default_library",
],
)

View File

@ -0,0 +1,17 @@
package migrations
import (
"fmt"
"code.hackerspace.pl/hscloud/go/mirko"
"github.com/golang-migrate/migrate/v4"
)
func New(dburl string) (*migrate.Migrate, error) {
source, err := mirko.NewMigrationsFromBazel(Data)
if err != nil {
return nil, fmt.Errorf("could not create migrations: %v", err)
}
return migrate.NewWithSourceInstance("bazel", source, dburl)
}

View File

@ -0,0 +1,141 @@
package model
import (
"context"
"fmt"
"net"
"strings"
"time"
pb "code.hackerspace.pl/hscloud/bgpwtf/cccampix/proto"
"code.hackerspace.pl/hscloud/bgpwtf/cccampix/verifier/model/migrations"
migrate "github.com/golang-migrate/migrate/v4"
_ "github.com/golang-migrate/migrate/v4/database/cockroachdb"
"github.com/jmoiron/sqlx"
_ "github.com/lib/pq"
)
type Model interface {
MigrateUp() error
RecordPeeringDBPeers(ctx context.Context, members []*pb.PeeringDBMember) error
RecordPeeringDBPeerRouters(ctx context.Context, members []*pb.PeeringDBMember) error
GetPeeringDBPeer(ctx context.Context, asn int64) (*pb.PeeringDBMember, error)
GetCheckablePeers(ctx context.Context) ([]*Peer, error)
SubmitPeerCheckResults(ctx context.Context, res []*PeerCheckResult) error
GetPeerCheckResults(ctx context.Context, asn []int64) ([]*PeerCheckResult, error)
UpdatePGPKey(ctx context.Context, key *PeerPGPKey) error
ConfigureMissingSessions(ctx context.Context, gen func() SessionConfig) error
UpdateAllowedPrefixes(ctx context.Context, asn int64, prefixes []*AllowedPrefix) error
GetAllowedPrefixes(ctx context.Context, asn int64) ([]*AllowedPrefix, error)
}
type stringer struct {
}
func (s *stringer) String() string {
if s == nil {
return "<nil>"
}
return fmt.Sprintf("%+v", *s)
}
type Router struct {
stringer
V6 net.IP
V4 net.IP
}
type Peer struct {
stringer
ASN int64
Name string
Routers []*Router
}
type PeerCheckStatus int
const (
PeerCheckStatus_Invalid PeerCheckStatus = iota
PeerCheckStatus_Okay
PeerCheckStatus_Failed
PeerCheckStatus_SoftFailed
)
type PeerCheckResult struct {
PeerASN int64
CheckName string
Time time.Time
Status PeerCheckStatus
Message string
}
func (p *PeerCheckResult) String() string {
if p == nil {
return "<nil>"
}
return fmt.Sprintf("%+v", *p)
}
type PeerPGPKey struct {
stringer
PeerASN int64
Fingerprint string
}
type SessionConfig struct {
BGPSecret string
}
type AllowedPrefix struct {
Prefix net.IPNet
MaxLength int64
TA string
}
func (p *AllowedPrefix) String() string {
if p == nil {
return "<nil>"
}
return fmt.Sprintf("%+v", *p)
}
type sqlModel struct {
db *sqlx.DB
dsn string
}
func Connect(ctx context.Context, driver, dsn string) (Model, error) {
if dsn == "" {
return nil, fmt.Errorf("dsn cannot be empty")
}
db, err := sqlx.ConnectContext(ctx, driver, dsn)
if err != nil {
return nil, fmt.Errorf("could not connect to database: %v", err)
}
return &sqlModel{
db: db,
dsn: dsn,
}, nil
}
func (m *sqlModel) MigrateUp() error {
dsn := "cockroach://" + strings.TrimPrefix(m.dsn, "postgres://")
mig, err := migrations.New(dsn)
if err != nil {
return err
}
err = mig.Up()
switch err {
case migrate.ErrNoChange:
return nil
default:
return err
}
}

View File

@ -0,0 +1,128 @@
package model
import (
"context"
"database/sql"
"fmt"
pb "code.hackerspace.pl/hscloud/bgpwtf/cccampix/proto"
"github.com/golang/glog"
"github.com/jmoiron/sqlx"
)
func (m *sqlModel) RecordPeeringDBPeerRouters(ctx context.Context, members []*pb.PeeringDBMember) error {
tx := m.db.MustBeginTx(ctx, &sql.TxOptions{})
defer tx.Rollback()
for _, member := range members {
// Get existing routers for peer.
q := `
SELECT peer_routers.id, peer_routers.v4, peer_routers.v6
FROM peer_routers
LEFT JOIN peers ON (peer_routers.peer_id = peers.id)
WHERE peer_routers.source = 'from-peeringdb'
AND peers.asn = ?
`
q = tx.Rebind(q)
existing := []sqlPeerRouter{}
if err := tx.SelectContext(ctx, &existing, q, fmt.Sprintf("%d", member.Asn)); err != nil {
return fmt.Errorf("SELECT peerRouters: %v", err)
}
// Find all routers that need to be deleted because they're outdated.
// We do not attempt updates, only removals/recreations.
// UUID -> bool
toDelete := make(map[string]bool)
for _, ex := range existing {
// Try to find a requested router with same IP addresses.
found := false
for _, router := range member.Routers {
if router.Ipv4 == ex.V4.String && router.Ipv6 == ex.V6.String {
found = true
break
}
}
// Not found, mark for deletion.
if !found {
toDelete[ex.ID] = true
}
}
// Find all routers that need to be created.
toAdd := []sqlPeerRouter{}
for _, router := range member.Routers {
// Try to find an existing router with same IP addresses.
found := false
for _, ex := range existing {
if router.Ipv4 == ex.V4.String && router.Ipv6 == ex.V6.String {
found = true
break
}
}
// Not found, mark for creation.
if !found {
ta := sqlPeerRouter{
Source: "from-peeringdb",
ASN: fmt.Sprintf("%d", member.Asn),
}
if router.Ipv6 != "" {
ta.V6.String = router.Ipv6
ta.V6.Valid = true
}
if router.Ipv4 != "" {
ta.V4.String = router.Ipv4
ta.V4.Valid = true
}
toAdd = append(toAdd, ta)
}
}
if len(toDelete) > 0 {
glog.Infof("RecordPeeringDBPeers: deleting %v", toDelete)
}
if len(toAdd) > 0 {
glog.Infof("RecordPeeringDBPeers: adding %+v", toAdd)
}
// Delete any routers, if needed.
if len(toDelete) > 0 {
// Get list of IDs to delete.
deleteIds := make([]string, len(toDelete))
i := 0
for id, _ := range toDelete {
deleteIds[i] = id
i += 1
}
query, args, err := sqlx.In("DELETE FROM peer_Routers WHERE id IN (?)", deleteIds)
if err != nil {
return fmt.Errorf("DELETE peerRouters: %v", err)
}
query = tx.Rebind(query)
_, err = tx.ExecContext(ctx, query, args...)
if err != nil {
return fmt.Errorf("DELETE peerRouters: %v", err)
}
}
// Add any routers, if needed.
for _, ta := range toAdd {
q := `
INSERT INTO peer_routers
(peer_id, v6, v4, source)
SELECT
peers.id, :v6, :v4, :source
FROM
peers
WHERE peers.asn = :asn
`
if _, err := tx.NamedExecContext(ctx, q, ta); err != nil {
return fmt.Errorf("INSERT peerRouters: %v", err)
}
}
}
return tx.Commit()
}

View File

@ -0,0 +1,199 @@
package model
import (
"context"
"database/sql"
"fmt"
"strconv"
"strings"
pb "code.hackerspace.pl/hscloud/bgpwtf/cccampix/proto"
"github.com/golang/glog"
"github.com/jmoiron/sqlx"
)
func (m *sqlModel) RecordPeeringDBPeers(ctx context.Context, members []*pb.PeeringDBMember) error {
tx := m.db.MustBeginTx(ctx, &sql.TxOptions{})
defer tx.Rollback()
wanted := make(map[string]*pb.PeeringDBMember)
for _, member := range members {
wanted[fmt.Sprintf("%d", member.Asn)] = member
}
toDelete := make(map[string]bool)
toAdd := make(map[string]bool)
toUpdate := make(map[string]bool)
existing := []sqlPeer{}
existingMap := make(map[string]*sqlPeer)
q := `
SELECT peers.id, peers.asn, peers.name, peers.source
FROM peers
`
if err := tx.SelectContext(ctx, &existing, q); err != nil {
return fmt.Errorf("SELECT peers: %v", err)
}
// Mark ASs to delete and note existing ASs
for _, ex := range existing {
ex := ex
if wanted[ex.ASN] == nil && ex.Source == "from-peeringdb" {
toDelete[ex.ASN] = true
}
existingMap[ex.ASN] = &ex
}
// Mark ASs to add
for k, _ := range wanted {
if existingMap[k] == nil {
toAdd[k] = true
}
}
// Mark ASs to update
for k, wd := range wanted {
if existingMap[k] == nil {
continue
}
if existingMap[k].Source != "from-peeringdb" {
continue
}
if wd.Name != existingMap[k].Name {
toUpdate[k] = true
continue
}
}
if len(toAdd) > 0 {
glog.Infof("RecordPeeringDBPeers: adding %v", toAdd)
}
if len(toDelete) > 0 {
glog.Infof("RecordPeeringDBPeers: deleting %v", toDelete)
}
if len(toUpdate) > 0 {
glog.Infof("RecordPeeringDBPeers: updating %v", toUpdate)
}
// Run INSERT to add new ASNs
if len(toAdd) > 0 {
q = `
INSERT INTO peers
(asn, name, source)
VALUES
(:asn, :name, :source)
`
add := make([]*sqlPeer, len(toAdd))
i := 0
for ta, _ := range toAdd {
add[i] = &sqlPeer{
ASN: ta,
Name: wanted[ta].Name,
Source: "from-peeringdb",
}
i += 1
}
if _, err := tx.NamedExecContext(ctx, q, add); err != nil {
return fmt.Errorf("INSERT peers: %v", err)
}
}
// Run DELETE to remove nonexistent ASNs
if len(toDelete) > 0 {
deleteIds := make([]string, len(toDelete))
i := 0
for td, _ := range toDelete {
deleteIds[i] = existingMap[td].ID
i += 1
}
query, args, err := sqlx.In("DELETE FROM peers WHERE id IN (?)", deleteIds)
if err != nil {
return fmt.Errorf("DELETE peers: %v", err)
}
query = tx.Rebind(query)
_, err = tx.ExecContext(ctx, query, args...)
if err != nil {
return fmt.Errorf("DELETE peers: %v", err)
}
}
// Run UPDATE to update existing ASNs
for k, _ := range toUpdate {
want := wanted[k]
got := existingMap[k]
fields := []string{}
args := []interface{}{}
if want.Name != got.Name {
fields = append(fields, "name = ?")
args = append(args, want.Name)
}
q = fmt.Sprintf(`
UPDATE peers
SET
%s
WHERE
id = ?
`, strings.Join(fields, ",\n"))
q = tx.Rebind(q)
args = append(args, got.ID)
_, err := tx.ExecContext(ctx, q, args...)
if err != nil {
return fmt.Errorf("UPDATE peers: %v", err)
}
}
return tx.Commit()
}
func (s *sqlModel) GetPeeringDBPeer(ctx context.Context, asn int64) (*pb.PeeringDBMember, error) {
data := []struct {
sqlPeer `db:"peers"`
sqlPeerRouter `db:"peer_routers"`
}{}
q := `
SELECT
peers.id "peers.id",
peers.asn "peers.asn",
peers.name "peers.name",
peer_routers.peer_id "peer_routers.peer_id",
peer_routers.v6 "peer_routers.v6",
peer_routers.v4 "peer_routers.v4"
FROM peers
LEFT JOIN peer_routers
ON peer_routers.peer_id = peers.id
WHERE peers.asn = $1
`
if err := s.db.SelectContext(ctx, &data, q, asn); err != nil {
return nil, fmt.Errorf("SELECT peers/peerRouters: %v", err)
}
res := &pb.PeeringDBMember{}
for i, row := range data {
if res.Routers == nil {
asn, err := strconv.ParseInt(row.sqlPeer.ASN, 10, 64)
if err != nil {
return nil, fmt.Errorf("data corruption: invalid ASN %q", row.sqlPeer.ASN)
}
res.Asn = asn
res.Name = row.sqlPeer.Name
res.Routers = make([]*pb.PeeringDBMember_Router, len(data))
}
res.Routers[i] = &pb.PeeringDBMember_Router{}
if row.sqlPeerRouter.V6.Valid {
res.Routers[i].Ipv6 = row.sqlPeerRouter.V6.String
}
if row.sqlPeerRouter.V4.Valid {
res.Routers[i].Ipv4 = row.sqlPeerRouter.V4.String
}
}
return res, nil
}

View File

@ -0,0 +1,31 @@
package model
import (
"context"
"fmt"
"time"
)
func (s *sqlModel) UpdatePGPKey(ctx context.Context, key *PeerPGPKey) error {
q := `
INSERT INTO peer_pgp_keys
(peer_id, fingerprint, time_created)
SELECT
peers.id, :fingerprint, :time_created
FROM peers
WHERE peers.asn = :asn
ON CONFLICT (peer_id)
DO UPDATE SET
fingerprint = :fingerprint,
time_created = :time_created
`
data := &sqlPeerPGPKey{
Fingerprint: key.Fingerprint,
ASN: fmt.Sprintf("%d", key.PeerASN),
TimeCreated: time.Now().UnixNano(),
}
if _, err := s.db.NamedExecContext(ctx, q, data); err != nil {
return fmt.Errorf("INSERT peer_pgp_keys: %v", err)
}
return nil
}

View File

@ -0,0 +1,65 @@
package model
import "database/sql"
type sqlPeer struct {
ID string `db:"id"`
ASN string `db:"asn"`
Name string `db:"name"`
Source string `db:"source"`
}
type sqlPeerRouter struct {
ID string `db:"id"`
PeerID string `db:"peer_id"`
V6 sql.NullString `db:"v6"`
V4 sql.NullString `db:"v4"`
Source string `db:"source"`
// Fake, used by app logic.
ASN string `db:"asn"`
}
type sqlPeerCheck struct {
ID string `db:"id"`
PeerID string `db:"peer_id"`
CheckName string `db:"check_name"`
CheckTime int64 `db:"check_time"`
CheckStatus string `db:"check_status"`
CheckMessage string `db:"check_message"`
Delete bool `db:"delete"`
// Fake, used by app logic.
ASN string `db:"asn"`
}
type sqlPeerPGPKey struct {
ID string `db:"id"`
PeerID string `db:"peer_id"`
Fingerprint string `db:"fingerprint"`
TimeCreated int64 `db:"time_created"`
// Fake, used by app logic.
ASN string `db:"asn"`
}
type sqlSessionConfig struct {
ID string `db:"id"`
PeerID string `db:"peer_id"`
PeerRouterID string `db:"peer_router_id"`
BGPSecret string `db:"bgp_secret"`
}
type sqlAllowedPrefix struct {
ID string `db:"id"`
PeerID string `db:"peer_id"`
Timestamp int64 `db:"timestamp"`
Prefix string `db:"prefix"`
MaxLength int64 `db:"max_length"`
TA string `db:"ta"`
// Fake, used by app logic.
ASN string `db:"asn"`
}

View File

@ -0,0 +1,73 @@
package model
import (
"context"
"database/sql"
"fmt"
"github.com/golang/glog"
)
func (s *sqlModel) SubmitPeerCheckResults(ctx context.Context, res []*PeerCheckResult) error {
tx := s.db.MustBeginTx(ctx, &sql.TxOptions{})
defer tx.Rollback()
q := `
UPDATE peer_checks
SET delete = true
`
if _, err := tx.ExecContext(ctx, q); err != nil {
return fmt.Errorf("UPDATE for deletion peer_checks: %v", err)
}
seenASNs := make(map[int64]bool)
for _, pcr := range res {
seenASNs[pcr.PeerASN] = true
q = `
INSERT INTO peer_checks
(peer_id, check_name, check_time, check_status, check_message, delete)
SELECT
peers.id, :check_name, :check_time, :check_status, :check_message, false
FROM peers
WHERE peers.asn = :asn
ON CONFLICT (peer_id, check_name)
DO UPDATE SET
check_time = :check_time,
check_status = :check_status,
check_message = :check_message,
delete = false
`
status := "uknown"
switch pcr.Status {
case PeerCheckStatus_Okay:
status = "okay"
case PeerCheckStatus_Failed:
status = "failed"
case PeerCheckStatus_SoftFailed:
glog.Infof("Skipping soft failure: %+v", pcr)
continue
}
cr := sqlPeerCheck{
CheckName: pcr.CheckName,
CheckTime: pcr.Time.UnixNano(),
CheckStatus: status,
CheckMessage: pcr.Message,
ASN: fmt.Sprintf("%d", pcr.PeerASN),
}
if _, err := tx.NamedExecContext(ctx, q, cr); err != nil {
return fmt.Errorf("INSERT peer_checks: %v", err)
}
}
q = `
DELETE FROM peer_checks
WHERE delete = true
`
if _, err := tx.ExecContext(ctx, q); err != nil {
return fmt.Errorf("DELETE FROM peer_checks: %v", err)
}
return tx.Commit()
}

View File

@ -0,0 +1,247 @@
package main
import (
"context"
"encoding/hex"
"fmt"
"strings"
"sync"
"time"
"code.hackerspace.pl/hscloud/go/pki"
"github.com/golang/glog"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
pb "code.hackerspace.pl/hscloud/bgpwtf/cccampix/proto"
"code.hackerspace.pl/hscloud/bgpwtf/cccampix/verifier/model"
)
const (
RS_ASN = "AS208521"
RS_ASSET = "AS-CCCAMP19-IX"
)
type irr struct {
irrc pb.IRRClient
}
func newIRR(addr string) (processor, error) {
conn, err := grpc.Dial(addr, pki.WithClientHSPKI())
if err != nil {
return nil, fmt.Errorf("could not connect to irr service: %v", err)
}
return &irr{
irrc: pb.NewIRRClient(conn),
}, nil
}
func (i *irr) Name() string {
return "IRR"
}
func (i *irr) NextRun(now time.Time) time.Time {
return now.Add(5 * time.Minute)
}
func (i *irr) RunAll(ctx context.Context, m model.Model) error {
peers, err := m.GetCheckablePeers(ctx)
if err != nil {
return fmt.Errorf("could not retrieve peers: %v", err)
}
results := make(chan *model.PeerCheckResult)
pcr := []*model.PeerCheckResult{}
pcrDone := make(chan struct{})
pgpKeys := make(chan *model.PeerPGPKey)
pk := []*model.PeerPGPKey{}
pkDone := make(chan struct{})
go func() {
for res := range results {
pcr = append(pcr, res)
}
pcrDone <- struct{}{}
}()
go func() {
for res := range pgpKeys {
pk = append(pk, res)
}
pkDone <- struct{}{}
}()
fail := func(p *model.Peer, hard bool, f string, args ...interface{}) {
status := model.PeerCheckStatus_SoftFailed
if hard {
status = model.PeerCheckStatus_Failed
}
results <- &model.PeerCheckResult{
PeerASN: p.ASN,
CheckName: "irr",
Time: time.Now(),
Status: status,
Message: fmt.Sprintf(f, args...),
}
}
var wg sync.WaitGroup
wg.Add(len(peers))
sem := make(chan struct{}, 10)
for _, peer := range peers {
go func(p *model.Peer) {
sem <- struct{}{}
defer func() {
<-sem
wg.Done()
}()
req := &pb.IRRQueryRequest{
As: fmt.Sprintf("%d", p.ASN),
}
res, err := i.irrc.Query(ctx, req)
if err != nil {
s, ok := status.FromError(err)
switch {
case ok && s.Code() == codes.NotFound:
fail(p, true, "ASN %d not found in IRR", p.ASN)
case ok && s.Code() == codes.Unimplemented:
fail(p, true, "ASN %d belongs to an unknown IRR/RIR", p.ASN)
case ok && s.Code() == codes.Unavailable:
fail(p, false, "could not contact IRR")
default:
glog.Errorf("IRR.Query(%d): %v", p.ASN, err)
fail(p, false, "unhandled IRR error")
}
return
}
importOkay := false
exportOkay := false
pgpKey := ""
for _, attr := range res.Attributes {
switch value := attr.Value.(type) {
case *pb.IRRAttribute_Remarks:
if ok, key := i.checkRemarks(value.Remarks); ok {
pgpKey = key
}
case *pb.IRRAttribute_Import:
if i.checkImport(value.Import) {
importOkay = true
}
case *pb.IRRAttribute_Export:
if i.checkExport(value.Export, p.ASN) {
exportOkay = true
}
}
}
switch {
case !importOkay:
fail(p, true, "no `import: from %s accept %s` entry", RS_ASN, RS_ASSET)
return
case !exportOkay:
fail(p, true, "no `export: to %s announce AS%d` entry", RS_ASN, p.ASN)
return
case pgpKey == "":
fail(p, true, "no `remarks: CCCAMP19-IX PGP: <...>` entry")
return
}
pgpKeys <- &model.PeerPGPKey{
PeerASN: p.ASN,
Fingerprint: pgpKey,
}
results <- &model.PeerCheckResult{
PeerASN: p.ASN,
CheckName: "irr",
Time: time.Now(),
Status: model.PeerCheckStatus_Okay,
Message: "",
}
}(peer)
}
wg.Wait()
close(results)
close(pgpKeys)
<-pcrDone
<-pkDone
err = m.SubmitPeerCheckResults(ctx, pcr)
if err != nil {
return err
}
for _, k := range pk {
err = m.UpdatePGPKey(ctx, k)
if err != nil {
return err
}
}
return nil
}
func (i *irr) checkRemarks(remarks string) (bool, string) {
label := "cccamp19-ix pgp:"
remarks = strings.TrimSpace(strings.ToLower(remarks))
if !strings.HasPrefix(remarks, label) {
return false, ""
}
data := strings.TrimSpace(strings.TrimPrefix(remarks, label))
data = strings.ReplaceAll(data, " ", "")
data = strings.ReplaceAll(data, "\t", "")
if len(data) != 40 {
return false, ""
}
if _, err := hex.DecodeString(data); err != nil {
return false, ""
}
return true, data
}
func (i *irr) checkImport(imp *pb.IRRAttribute_ImportExport) bool {
if imp.ProtocolFrom != "" && strings.ToLower(imp.ProtocolFrom) != "bgp" {
return false
}
if strings.ToUpper(imp.Filter) != RS_ASSET {
return false
}
for _, expression := range imp.Expressions {
if strings.ToUpper(expression.Peering) == RS_ASN {
return true
}
}
return false
}
func (i *irr) checkExport(exp *pb.IRRAttribute_ImportExport, asn int64) bool {
if exp.ProtocolInto != "" && strings.ToLower(exp.ProtocolInto) != "bgp" {
return false
}
if strings.ToUpper(exp.Filter) != fmt.Sprintf("AS%d", asn) {
return false
}
for _, expression := range exp.Expressions {
if strings.ToUpper(expression.Peering) == RS_ASN {
return true
}
}
return false
}

View File

@ -0,0 +1,58 @@
package main
import (
"context"
"fmt"
"time"
"code.hackerspace.pl/hscloud/go/pki"
"google.golang.org/grpc"
pb "code.hackerspace.pl/hscloud/bgpwtf/cccampix/proto"
"code.hackerspace.pl/hscloud/bgpwtf/cccampix/verifier/model"
)
type peeringDB struct {
pdb pb.PeeringDBProxyClient
}
func newPeeringDB(addr string) (processor, error) {
conn, err := grpc.Dial(addr, pki.WithClientHSPKI())
if err != nil {
return nil, fmt.Errorf("could not connect to peeringdb service: %v", err)
}
return &peeringDB{
pdb: pb.NewPeeringDBProxyClient(conn),
}, nil
}
func (p *peeringDB) Name() string {
return "PeeringDB"
}
func (p *peeringDB) NextRun(now time.Time) time.Time {
return now.Add(5 * time.Minute)
}
func (p *peeringDB) RunAll(ctx context.Context, m model.Model) error {
id := int64(2641)
req := &pb.GetIXMembersRequest{
Id: id,
}
res, err := p.pdb.GetIXMembers(ctx, req)
if err != nil {
return fmt.Errorf("GetIXMembers(%d): %v", id, err)
}
err = m.RecordPeeringDBPeers(ctx, res.Members)
if err != nil {
return fmt.Errorf("RecordPeeringDBPeers: %v", err)
}
err = m.RecordPeeringDBPeerRouters(ctx, res.Members)
if err != nil {
return fmt.Errorf("RecordPeeringDBPeerRouters: %v", err)
}
return nil
}

View File

@ -0,0 +1,130 @@
package main
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
"strconv"
"strings"
"time"
"code.hackerspace.pl/hscloud/bgpwtf/cccampix/verifier/model"
"github.com/golang/glog"
)
type rpki struct {
octorpki string
}
func newRPKI(octorpki string) (processor, error) {
return &rpki{
octorpki: octorpki,
}, nil
}
func (p *rpki) Name() string {
return "RPKI"
}
func (p *rpki) NextRun(now time.Time) time.Time {
return now.Add(1 * time.Minute)
}
type octorpkiRes struct {
Metadata struct {
Counts int64 `json:"counts"`
Generated int64 `json:"counts"`
Valid int64 `json:"counts"`
} `json:"metadata"`
ROAs []octorpkiROA `json:"roas"`
}
type octorpkiROA struct {
Prefix string `json:"prefix"`
MaxLength int64 `json:"maxLength"`
ASN string `json:"asn"`
TA string `json:"ta"`
}
func (p *rpki) RunAll(ctx context.Context, m model.Model) error {
peers, err := m.GetCheckablePeers(ctx)
if err != nil {
return err
}
wantASNs := make(map[string]bool)
for _, peer := range peers {
wantASNs[fmt.Sprintf("AS%d", peer.ASN)] = true
}
// Get RPKI data dump from OctoRPKI.
url := fmt.Sprintf("http://%s/output.json", p.octorpki)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return fmt.Errorf("NewRequest(GET %q): %v", url, err)
}
req = req.WithContext(ctx)
client := http.Client{}
resp, err := client.Do(req)
if err != nil {
return fmt.Errorf("GET %q: %v", url, err)
}
defer resp.Body.Close()
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("GET %q: %v", url, err)
}
if strings.HasPrefix(string(data), "File not ready yet") {
return fmt.Errorf("OctoRPKI not yet ready")
}
var res octorpkiRes
if err := json.Unmarshal(data, &res); err != nil {
return fmt.Errorf("Could not decode OctoRPKI output: %v", err)
}
// Make list of prefixes we should honor.
prefixes := make(map[int64][]*model.AllowedPrefix)
for _, roa := range res.ROAs {
if !wantASNs[strings.ToUpper(roa.ASN)] {
continue
}
asn, err := strconv.ParseInt(roa.ASN[2:], 10, 64)
if err != nil {
glog.Errorf("Invalid ASN: %s %q", roa.ASN, roa.ASN)
continue
}
if _, ok := prefixes[asn]; !ok {
prefixes[asn] = []*model.AllowedPrefix{}
}
_, prefix, err := net.ParseCIDR(roa.Prefix)
if err != nil {
glog.Errorf("Invalid prefix: %s %q", roa.ASN, roa.Prefix)
continue
}
prefixes[asn] = append(prefixes[asn], &model.AllowedPrefix{
Prefix: *prefix,
MaxLength: roa.MaxLength,
TA: roa.TA,
})
}
for asn, p := range prefixes {
err := m.UpdateAllowedPrefixes(ctx, asn, p)
if err != nil {
return err
}
}
return nil
}

View File

@ -0,0 +1,35 @@
package main
import (
"context"
"time"
"code.hackerspace.pl/hscloud/bgpwtf/cccampix/verifier/model"
"github.com/sethvargo/go-password/password"
)
type secretGen struct {
}
func newSecretGen() (processor, error) {
return &secretGen{}, nil
}
func (p *secretGen) Name() string {
return "SecretGen"
}
func (p *secretGen) NextRun(now time.Time) time.Time {
return now.Add(1 * time.Minute)
}
func gen() model.SessionConfig {
secret := password.MustGenerate(16, 4, 0, false, true)
return model.SessionConfig{
BGPSecret: secret,
}
}
func (p *secretGen) RunAll(ctx context.Context, m model.Model) error {
return m.ConfigureMissingSessions(ctx, gen)
}

View File

@ -0,0 +1,15 @@
package main
import (
"context"
"time"
"code.hackerspace.pl/hscloud/bgpwtf/cccampix/verifier/model"
)
type processor interface {
Name() string
NextRun(time.Time) time.Time
RunAll(ctx context.Context, m model.Model) error
}

View File

@ -0,0 +1,4 @@
package main
type state struct {
}

View File

@ -0,0 +1,94 @@
package main
import (
"context"
"fmt"
"sort"
humanize "github.com/dustin/go-humanize"
)
const processorsFragment = `
<style type="text/css">
.table td,th {
background-color: #eee;
padding: 0.2em 0.4em 0.2em 0.4em;
}
.table th {
background-color: #c0c0c0;
}
.table {
background-color: #fff;
border-spacing: 0.2em;
margin-left: auto;
margin-right: auto;
}
</style>
<div>
<table class="table">
<tr>
<th>Name</th>
<th>Status</th>
<th>Last Run</th>
<th>Next Run</th>
</tr>
{{range .Processors }}
<tr>
<td>{{ .Name }}</td>
{{ if ne .Status "OK" }}
<td style="background-color: #ff3030;">{{ .Status }}</td>
{{ else }}
<td>{{ .Status }}</td>
{{ end }}
<td>{{ .LastRun }}</td>
<td>{{ .NextRun }}</td>
</tr>
{{end}}
</table>
</div>
`
type processorsFragmentEntry struct {
Name string
Status string
LastRun string
NextRun string
}
func (s *service) statuszProcessors(ctx context.Context) interface{} {
s.processorsMu.RLock()
defer s.processorsMu.RUnlock()
res := struct {
Processors []*processorsFragmentEntry
}{
Processors: make([]*processorsFragmentEntry, len(s.processors)),
}
i := 0
for _, processor := range s.processors {
lastRun := "never"
if processor.lastRun != nil {
lastRun = humanize.Time(*processor.lastRun)
}
nextRun := "any second now"
if nr := processor.nextRun(); nr != nil {
nextRun = humanize.Time(*nr)
}
status := "OK"
if processor.lastErr != nil {
status = fmt.Sprintf("%v", processor.lastErr)
}
res.Processors[i] = &processorsFragmentEntry{
Name: processor.name,
Status: status,
LastRun: lastRun,
NextRun: nextRun,
}
i += 1
}
sort.Slice(res.Processors, func(i, j int) bool { return res.Processors[i].Name < res.Processors[j].Name })
return res
}

View File

@ -128,6 +128,10 @@ local Cluster(fqdn) = {
hostPath: "/var/db/crdb-waw1",
},
},
clients: {
cccampix: k0.cockroach.waw2.Client("cccampix"),
cccampixDev: k0.cockroach.waw2.Client("cccampix-dev"),
},
},
ceph: {
// waw1 cluster - dead as of 2019/08/06, data corruption

View File

@ -49,6 +49,7 @@ local cm = import "cert-manager.libsonnet";
portHttp: 8080,
hostPath: error "hostPath must be defined",
topology: error "topology must be defined",
clients: [],
namespace: null,
ownNamespace: cluster.cfg.namespace == null,
@ -366,6 +367,20 @@ local cm = import "cert-manager.libsonnet";
},
},
Client(name):: {
certificate: cm.Certificate(cluster.name("client-%s" % name)) {
metadata+: cluster.metadata,
spec: {
secretName: cluster.name("client-%s-certificate" % name),
duration: "43800h0m0s", // 5 years
issuerRef: {
name: cluster.pki.clusterIssuer.metadata.name,
},
commonName: name,
},
},
},
client: kube.Deployment(cluster.name("client")) {
metadata+: cluster.metadata {
labels+: {

View File

@ -5,5 +5,7 @@ RUN set -e -x ;\
apt-get -y update ;\
apt-get -y install \
ca-certificates \
rsync ;\
rsync \
python \
python3 ;\
rm -rf /var/lib/apt/lists

View File

@ -5,7 +5,9 @@ go_library(
srcs = [
"kubernetes.go",
"mirko.go",
"sql.go",
"sql_migrations.go",
"trace.go",
],
importpath = "code.hackerspace.pl/hscloud/go/mirko",
visibility = ["//visibility:public"],

35
go/mirko/sql.go Normal file
View File

@ -0,0 +1,35 @@
package mirko
import (
"context"
"database/sql"
"database/sql/driver"
"time"
"github.com/gchaincl/sqlhooks"
"golang.org/x/net/trace"
)
type sqlHooks struct{}
func (h *sqlHooks) Before(ctx context.Context, query string, args ...interface{}) (context.Context, error) {
tr, ok := trace.FromContext(ctx)
if ok {
tr.LazyPrintf("SQL query: %s", query)
tr.LazyPrintf("SQL args: %+v", args)
}
return context.WithValue(ctx, "begin", time.Now()), nil
}
func (h *sqlHooks) After(ctx context.Context, query string, args ...interface{}) (context.Context, error) {
begin := ctx.Value("begin").(time.Time)
tr, ok := trace.FromContext(ctx)
if ok {
tr.LazyPrintf("SQL took: %s", time.Since(begin).String())
}
return ctx, nil
}
func TraceSQL(driver driver.Driver, wrapped string) {
sql.Register(wrapped, sqlhooks.Wrap(driver, &sqlHooks{}))
}

37
go/mirko/trace.go Normal file
View File

@ -0,0 +1,37 @@
package mirko
import (
"context"
"fmt"
"github.com/golang/glog"
"golang.org/x/net/trace"
)
func TraceInfof(ctx context.Context, f string, args ...interface{}) {
tr, ok := trace.FromContext(ctx)
if !ok {
fmtd := fmt.Sprintf(f, args...)
glog.Info("[no trace] %v", fmtd)
return
}
tr.LazyPrintf(f, args...)
}
func TraceWarningf(ctx context.Context, f string, args ...interface{}) {
glog.Warningf(f, args...)
tr, ok := trace.FromContext(ctx)
if ok {
tr.LazyPrintf(f, args...)
}
}
func TraceErrorf(ctx context.Context, f string, args ...interface{}) {
glog.Errorf(f, args...)
tr, ok := trace.FromContext(ctx)
if ok {
tr.LazyPrintf(f, args...)
}
}

View File

@ -1,4 +1,7 @@
# This file is generated code. DO NOT EDIT.
arrow==0.14.5 \
--hash=sha256:0186026cfd94ca4fb773f30cc5398289a3027480d335e0e5c0d2772643763137 \
--hash=sha256:a12de0124d812d15061ed36c7eb4a421fa1b95026a502a0b2062e9ea00fc4446
asn1crypto==0.24.0 \
--hash=sha256:2f1adbb7546ed199e3c90ef23ec95c5cf3585bac7d11fb7eb562a3fe89c64e87 \
--hash=sha256:9d5c20441baf0cb60a4ac34cc447c6c189024b6b4c6cd7877034f4965c464e49
@ -61,6 +64,9 @@ cffi==1.11.5 \
chardet==3.0.4 \
--hash=sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae \
--hash=sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691
click==7.0 \
--hash=sha256:2335065e6395b9e67ca716de5f7526736bfa6ceead690adf616d925bdc622b13 \
--hash=sha256:5b94b49521f6456670fdb30cd82a4eca9412788a93fa6dd6df72c94d5a8ff2d7
cryptography==2.4.2 \
--hash=sha256:05a6052c6a9f17ff78ba78f8e6eb1d777d25db3b763343a1ae89a7a8670386dd \
--hash=sha256:0eb83a24c650a36f68e31a6d0a70f7ad9c358fa2506dc7b683398b92e354a038 \
@ -87,8 +93,91 @@ django==2.2.3 \
fabric==2.4.0 \
--hash=sha256:93684ceaac92e0b78faae551297e29c48370cede12ff0f853cdebf67d4b87068 \
--hash=sha256:98538f2f3f63cf52497a8d0b24d18424ae83fe67ac7611225c72afb9e67f2cf6
flask==1.1.1 \
--hash=sha256:13f9f196f330c7c2c5d7a5cf91af894110ca0215ac051b5844701f2bfd934d52 \
--hash=sha256:45eb5a6fd193d6cf7e0cf5d8a5b31f83d5faae0293695626f539a823e93b13f6
future==0.17.1 \
--hash=sha256:67045236dcfd6816dc439556d009594abf643e5eb48992e36beac09c2ca659b8
gevent==1.4.0 \
--hash=sha256:0774babec518a24d9a7231d4e689931f31b332c4517a771e532002614e270a64 \
--hash=sha256:0e1e5b73a445fe82d40907322e1e0eec6a6745ca3cea19291c6f9f50117bb7ea \
--hash=sha256:0ff2b70e8e338cf13bedf146b8c29d475e2a544b5d1fe14045aee827c073842c \
--hash=sha256:107f4232db2172f7e8429ed7779c10f2ed16616d75ffbe77e0e0c3fcdeb51a51 \
--hash=sha256:14b4d06d19d39a440e72253f77067d27209c67e7611e352f79fe69e0f618f76e \
--hash=sha256:1b7d3a285978b27b469c0ff5fb5a72bcd69f4306dbbf22d7997d83209a8ba917 \
--hash=sha256:1eb7fa3b9bd9174dfe9c3b59b7a09b768ecd496debfc4976a9530a3e15c990d1 \
--hash=sha256:2711e69788ddb34c059a30186e05c55a6b611cb9e34ac343e69cf3264d42fe1c \
--hash=sha256:28a0c5417b464562ab9842dd1fb0cc1524e60494641d973206ec24d6ec5f6909 \
--hash=sha256:3249011d13d0c63bea72d91cec23a9cf18c25f91d1f115121e5c9113d753fa12 \
--hash=sha256:44089ed06a962a3a70e96353c981d628b2d4a2f2a75ea5d90f916a62d22af2e8 \
--hash=sha256:4bfa291e3c931ff3c99a349d8857605dca029de61d74c6bb82bd46373959c942 \
--hash=sha256:50024a1ee2cf04645535c5ebaeaa0a60c5ef32e262da981f4be0546b26791950 \
--hash=sha256:53b72385857e04e7faca13c613c07cab411480822ac658d97fd8a4ddbaf715c8 \
--hash=sha256:74b7528f901f39c39cdbb50cdf08f1a2351725d9aebaef212a29abfbb06895ee \
--hash=sha256:7d0809e2991c9784eceeadef01c27ee6a33ca09ebba6154317a257353e3af922 \
--hash=sha256:896b2b80931d6b13b5d9feba3d4eebc67d5e6ec54f0cf3339d08487d55d93b0e \
--hash=sha256:8d9ec51cc06580f8c21b41fd3f2b3465197ba5b23c00eb7d422b7ae0380510b0 \
--hash=sha256:9f7a1e96fec45f70ad364e46de32ccacab4d80de238bd3c2edd036867ccd48ad \
--hash=sha256:ab4dc33ef0e26dc627559786a4fba0c2227f125db85d970abbf85b77506b3f51 \
--hash=sha256:d1e6d1f156e999edab069d79d890859806b555ce4e4da5b6418616322f0a3df1 \
--hash=sha256:d752bcf1b98174780e2317ada12013d612f05116456133a6acf3e17d43b71f05 \
--hash=sha256:e5bcc4270671936349249d26140c267397b7b4b1381f5ec8b13c53c5b53ab6e1
greenlet==0.4.15 \
--hash=sha256:000546ad01e6389e98626c1367be58efa613fa82a1be98b0c6fc24b563acc6d0 \
--hash=sha256:0d48200bc50cbf498716712129eef819b1729339e34c3ae71656964dac907c28 \
--hash=sha256:23d12eacffa9d0f290c0fe0c4e81ba6d5f3a5b7ac3c30a5eaf0126bf4deda5c8 \
--hash=sha256:37c9ba82bd82eb6a23c2e5acc03055c0e45697253b2393c9a50cef76a3985304 \
--hash=sha256:51503524dd6f152ab4ad1fbd168fc6c30b5795e8c70be4410a64940b3abb55c0 \
--hash=sha256:8041e2de00e745c0e05a502d6e6db310db7faa7c979b3a5877123548a4c0b214 \
--hash=sha256:81fcd96a275209ef117e9ec91f75c731fa18dcfd9ffaa1c0adbdaa3616a86043 \
--hash=sha256:853da4f9563d982e4121fed8c92eea1a4594a2299037b3034c3c898cb8e933d6 \
--hash=sha256:8b4572c334593d449113f9dc8d19b93b7b271bdbe90ba7509eb178923327b625 \
--hash=sha256:9416443e219356e3c31f1f918a91badf2e37acf297e2fa13d24d1cc2380f8fbc \
--hash=sha256:9854f612e1b59ec66804931df5add3b2d5ef0067748ea29dc60f0efdcda9a638 \
--hash=sha256:99a26afdb82ea83a265137a398f570402aa1f2b5dfb4ac3300c026931817b163 \
--hash=sha256:a19bf883b3384957e4a4a13e6bd1ae3d85ae87f4beb5957e35b0be287f12f4e4 \
--hash=sha256:a9f145660588187ff835c55a7d2ddf6abfc570c2651c276d3d4be8a2766db490 \
--hash=sha256:ac57fcdcfb0b73bb3203b58a14501abb7e5ff9ea5e2edfa06bb03035f0cff248 \
--hash=sha256:bcb530089ff24f6458a81ac3fa699e8c00194208a724b644ecc68422e1111939 \
--hash=sha256:beeabe25c3b704f7d56b573f7d2ff88fc99f0138e43480cecdfcaa3b87fe4f87 \
--hash=sha256:d634a7ea1fc3380ff96f9e44d8d22f38418c1c381d5fac680b272d7d90883720 \
--hash=sha256:d97b0661e1aead761f0ded3b769044bb00ed5d33e1ec865e891a8b128bf7c656
grpcio==1.22.0 \
--hash=sha256:03b78b4e7dcdfe3e257bb528cc93923f9cbbab6d5babf15a60d21e9a4a70b1a2 \
--hash=sha256:1ce0ccfbdfe84387dbcbf44adb4ae16ec7ae70e166ffab478993eb1ea1cba3ce \
--hash=sha256:22e167a9406d73dd19ffe8ed6a485f17e6eac82505be8c108897f15e68badcbb \
--hash=sha256:31d0aeca8d8ee2301c62c5c340e0889d653b1280d68f9fa203982cb6337b050e \
--hash=sha256:44c7f99ca17ebbcc96fc54ed00b454d8313f1eac28c563098d8b901025aff941 \
--hash=sha256:5471444f53f9db6a1f1f11f5dbc173228881df8446380b6b98f90afb8fd8348e \
--hash=sha256:561bca3b1bde6d6564306eb05848fd155136e9c3a25d2961129b1e2edba22fce \
--hash=sha256:5bf58e1d2c2f55365c06e8cb5abe067b88ca2e5550fb62009c41df4b54505acf \
--hash=sha256:6b7163d1e85d76b0815df63fcc310daec02b44532bb433f743142d4febcb181f \
--hash=sha256:766d79cddad95f5f6020037fe60ea8b98578afdf0c59d5a60c106c1bdd886303 \
--hash=sha256:770b7372d5ca68308ff66d7baee53369fa5ce985f84bcb6aa1948c1f2f7b02f2 \
--hash=sha256:7ab178da777fc0f55b6aef5a755f99726e8e4b75e3903954df07b27059b54fcf \
--hash=sha256:8078305e77c2f6649d36b24d8778096413e474d9d7892c6f92cfb589c9d71b2e \
--hash=sha256:85600b63a386d860eeaa955e9335e18dd0d7e5477e9214825abf2c2884488369 \
--hash=sha256:857d9b939ae128be1c0c792eb885c7ff6a386b9dea899ac4b06f4d90a31f9d87 \
--hash=sha256:87a41630c90c179fa5c593400f30a467c498972c702f348d41e19dafeb1d319e \
--hash=sha256:8805d486c6128cc0fcc8ecf16c4095d99a8693a541ef851429ab334e028a4a97 \
--hash=sha256:8d71b7a89c306a41ccc7741fc9409b14f5b86727455c2a1c0c7cfcb0f784e1f2 \
--hash=sha256:9e1b80bd65f8f160880cb4dad7f55697f6d37b2d7f251fc0c2128e811928f369 \
--hash=sha256:9e290c84a145ae2411ee0ec9913c41cd7500e2e7485fe93632434d84ef4fda67 \
--hash=sha256:9ec9f88b5bc94bd99372f27cdd53af1c92ba06717380b127733b953cfb181174 \
--hash=sha256:a0a02a8b4ba6deadf706d5f849539b3685b72b186a3c9ef5d43e8972ed60fb6f \
--hash=sha256:a4059c59519f5940e01a071f74ae2a60ea8f6185b03d22a09d40c7959a36b16b \
--hash=sha256:a6e028c2a6da2ebfa2365a5b32531d311fbfec0e3600fc27e901b64f0ff7e54e \
--hash=sha256:adcdebf9f8463df4120c427cf6c9aed39258bccd03ed37b6939e7a145d64d6e0 \
--hash=sha256:bdec982610259d07156a58f80b8c3e69be7751a9208bc577b059c5193d087fad \
--hash=sha256:cefc4d4251ffb73feb303d4b7e9d6c367cb60f2db16d259ea28b114045f965aa \
--hash=sha256:d4145c8aa6afbac10ad27e408f7ce15992fe89ba5d0b4abca31c0c2729864c03 \
--hash=sha256:da76dc5ad719ee99de5ea28a5629ff92172cbb4a70d8a6ae3a5b7a53c7382ce1 \
--hash=sha256:dde2452c08ef8b6426ccab6b5b6de9f06d836d9937d6870e68153cbf8cb49348 \
--hash=sha256:e3d88091d2539a4868750914a6fe7b9ec50e42b913851fc1b77423b5bd918530 \
--hash=sha256:f9c67cfe6278499d7f83559dc6322a8bbb108e307817a3d7acbfea807b3603cc
gunicorn==19.9.0 \
--hash=sha256:aa8e0b40b4157b36a5df5e599f45c9c76d6af43845ba3b3b0efe2c70473c2471 \
--hash=sha256:fa2662097c66f920f53f70621c6c58ca4a3c4d3434205e608e121b5b3b71f4f3
idna==2.8 \
--hash=sha256:c357b3f628cf53ae2c4c05627ecc484553142ca23264e593d327bcde5e9c3407 \
--hash=sha256:ea8b7f6188e6fa117537c3df7da9fc686d485087abf6ac197f9c46432f7e4a3c
@ -96,9 +185,63 @@ invoke==1.2.0 \
--hash=sha256:4f4de934b15c2276caa4fbc5a3b8a61c0eb0b234f2be1780d2b793321995c2d6 \
--hash=sha256:dc492f8f17a0746e92081aec3f86ae0b4750bf41607ea2ad87e5a7b5705121b7 \
--hash=sha256:eb6f9262d4d25b40330fb21d1e99bf0f85011ccc3526980f8a3eaedd4b43892e
itsdangerous==1.1.0 \
--hash=sha256:321b033d07f2a4136d3ec762eac9f16a10ccd60f53c0c91af90217ace7ba1f19 \
--hash=sha256:b12271b2047cb23eeb98c8b5622e2e5c5e9abd9784a153e9d8ef9cb4dd09d749
jinja2==2.10.1 \
--hash=sha256:065c4f02ebe7f7cf559e49ee5a95fb800a9e4528727aec6f24402a5374c65013 \
--hash=sha256:14dd6caf1527abb21f08f86c784eac40853ba93edb79552aa1e4b8aef1b61c7b
markupsafe==1.1.1 \
--hash=sha256:00bc623926325b26bb9605ae9eae8a215691f33cae5df11ca5424f06f2d1f473 \
--hash=sha256:09027a7803a62ca78792ad89403b1b7a73a01c8cb65909cd876f7fcebd79b161 \
--hash=sha256:09c4b7f37d6c648cb13f9230d847adf22f8171b1ccc4d5682398e77f40309235 \
--hash=sha256:1027c282dad077d0bae18be6794e6b6b8c91d58ed8a8d89a89d59693b9131db5 \
--hash=sha256:24982cc2533820871eba85ba648cd53d8623687ff11cbb805be4ff7b4c971aff \
--hash=sha256:29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b \
--hash=sha256:43a55c2930bbc139570ac2452adf3d70cdbb3cfe5912c71cdce1c2c6bbd9c5d1 \
--hash=sha256:46c99d2de99945ec5cb54f23c8cd5689f6d7177305ebff350a58ce5f8de1669e \
--hash=sha256:500d4957e52ddc3351cabf489e79c91c17f6e0899158447047588650b5e69183 \
--hash=sha256:535f6fc4d397c1563d08b88e485c3496cf5784e927af890fb3c3aac7f933ec66 \
--hash=sha256:62fe6c95e3ec8a7fad637b7f3d372c15ec1caa01ab47926cfdf7a75b40e0eac1 \
--hash=sha256:6dd73240d2af64df90aa7c4e7481e23825ea70af4b4922f8ede5b9e35f78a3b1 \
--hash=sha256:717ba8fe3ae9cc0006d7c451f0bb265ee07739daf76355d06366154ee68d221e \
--hash=sha256:79855e1c5b8da654cf486b830bd42c06e8780cea587384cf6545b7d9ac013a0b \
--hash=sha256:7c1699dfe0cf8ff607dbdcc1e9b9af1755371f92a68f706051cc8c37d447c905 \
--hash=sha256:88e5fcfb52ee7b911e8bb6d6aa2fd21fbecc674eadd44118a9cc3863f938e735 \
--hash=sha256:8defac2f2ccd6805ebf65f5eeb132adcf2ab57aa11fdf4c0dd5169a004710e7d \
--hash=sha256:98c7086708b163d425c67c7a91bad6e466bb99d797aa64f965e9d25c12111a5e \
--hash=sha256:9add70b36c5666a2ed02b43b335fe19002ee5235efd4b8a89bfcf9005bebac0d \
--hash=sha256:9bf40443012702a1d2070043cb6291650a0841ece432556f784f004937f0f32c \
--hash=sha256:ade5e387d2ad0d7ebf59146cc00c8044acbd863725f887353a10df825fc8ae21 \
--hash=sha256:b00c1de48212e4cc9603895652c5c410df699856a2853135b3967591e4beebc2 \
--hash=sha256:b1282f8c00509d99fef04d8ba936b156d419be841854fe901d8ae224c59f0be5 \
--hash=sha256:b2051432115498d3562c084a49bba65d97cf251f5a331c64a12ee7e04dacc51b \
--hash=sha256:ba59edeaa2fc6114428f1637ffff42da1e311e29382d81b339c1817d37ec93c6 \
--hash=sha256:c8716a48d94b06bb3b2524c2b77e055fb313aeb4ea620c8dd03a105574ba704f \
--hash=sha256:cd5df75523866410809ca100dc9681e301e3c27567cf498077e8551b6d20e42f \
--hash=sha256:e249096428b3ae81b08327a63a485ad0878de3fb939049038579ac0ef61e17e7
paramiko==2.4.2 \
--hash=sha256:3c16b2bfb4c0d810b24c40155dbfd113c0521e7e6ee593d704e84b4c658a1f3b \
--hash=sha256:a8975a7df3560c9f1e2b43dc54ebd40fd00a7017392ca5445ce7df409f900fcb
protobuf==3.9.0 \
--hash=sha256:05c36022fef3c7d3562ac22402965c0c2b9fe8421f459bb377323598996e407f \
--hash=sha256:139b7eadcca0a861d60b523cb37d9475505e0dfb07972436b15407c2b968d87e \
--hash=sha256:15f683006cb77fb849b1f561e509b03dd2b7dcc749086b8dd1831090d0ba4740 \
--hash=sha256:2ad566b7b7cdd8717c7af1825e19f09e8fef2787b77fcb979588944657679604 \
--hash=sha256:35cfcf97642ef62108e10a9431c77733ec7eaab8e32fe4653de20403429907cb \
--hash=sha256:387822859ecdd012fdc25ec879f7f487da6e1d5b1ae6115e227e6be208836f71 \
--hash=sha256:4df14cbe1e7134afcfdbb9f058949e31c466de27d9b2f7fb4da9e0b67231b538 \
--hash=sha256:586c4ca37a7146d4822c700059f150ac3445ce0aef6f3ea258640838bb892dc2 \
--hash=sha256:58b11e530e954d29ab3180c48dc558a409f705bf16739fd4e0d3e07924ad7add \
--hash=sha256:63c8c98ccb8c95f41c18fb829aeeab21c6249adee4ed75354125bdc44488f30e \
--hash=sha256:72edcbacd0c73eef507d2ff1af99a6c27df18e66a3ff4351e401182e4de62b03 \
--hash=sha256:83dc8a561b3b954fd7002c690bb83278b8d1742a1e28abba9aaef28b0c8b437d \
--hash=sha256:913171ecc84c2726b86574e40549a0ea619d569657c5a5ff782a3be7d81401a5 \
--hash=sha256:aabb7c741d3416671c3e6fe7c52970a226e6a8274417a97d7d795f953fadef36 \
--hash=sha256:b3452bbda12b1cbe2187d416779de07b2ab4c497d83a050e43c344778763721d \
--hash=sha256:c5d5b8d4a9212338297fa1fa44589f69b470c0ba1d38168b432d577176b386a8 \
--hash=sha256:d86ee389c2c4fc3cebabb8ce83a8e97b6b3b5dc727b7419c1ccdc7b6e545a233 \
--hash=sha256:f2db8c754de788ab8be5e108e1e967c774c0942342b4f8aaaf14063889a6cfdc
pyasn1==0.4.5 \
--hash=sha256:da2420fe13a9452d8ae97a0e478adde1dee153b11ba832a95b223a2ba01c10f7 \
--hash=sha256:da6b43a8c9ae93bc80e2739efb38cc776ba74a886e3e9318d65fe81a8b8a2c6e
@ -124,6 +267,9 @@ pynacl==1.3.0 \
--hash=sha256:bd4ecb473a96ad0f90c20acba4f0bf0df91a4e03a1f4dd6a4bdc9ca75aa3a715 \
--hash=sha256:e2da3c13307eac601f3de04887624939aca8ee3c9488a0bb0eca4fb9401fc6b1 \
--hash=sha256:f67814c38162f4deb31f68d590771a29d5ae3b1bd64b75cf232308e5c74777e0
python-dateutil==2.8.0 \
--hash=sha256:7e6584c74aeed623791615e26efd690f29817a27c73085b78e4bad02493df2fb \
--hash=sha256:c89805f6f4d64db21ed966fda138f8a5ed7a4fdbc1a8ee329ce1b74e3c74da9e
pytz==2019.1 \
--hash=sha256:303879e36b721603cc54604edcac9d20401bdbe31e1e4fdee5b9f98d5d31dfda \
--hash=sha256:d747dd3d23d77ef44c6a3526e274af6efeb0a6f1afd5a69ba4d5be4098c8e141
@ -141,3 +287,12 @@ urllib3==1.25.3 \
--hash=sha256:dbe59173209418ae49d485b87d1681aefa36252ee85884c31346debd19463232
uwsgi==2.0.18 \
--hash=sha256:4972ac538800fb2d421027f49b4a1869b66048839507ccf0aa2fda792d99f583
werkzeug==0.15.5 \
--hash=sha256:87ae4e5b5366da2347eb3116c0e6c681a0e939a33b2805e2c0cbd282664932c4 \
--hash=sha256:a13b74dd3c45f758d4ebdb224be8f1ab8ef58b3c0ffc1783a8c7d9f4f50227e6
# The following packages are considered to be unsafe in a requirements file:
setuptools==41.0.1 \
--hash=sha256:a222d126f5471598053c9a77f4b5d4f26eaa1f150ad6e01dcf1a42e185d05613 \
--hash=sha256:c7769ce668c7a333d84e17fe8b524b1c45e7ee9f7908ad0a73e1eda7e6a5aebf \
# via protobuf

View File

@ -1,22 +1,34 @@
arrow==0.14.5
asn1crypto==0.24.0
bcrypt==3.1.5
certifi==2019.6.16
cffi==1.11.5
chardet==3.0.4
Click==7.0
cryptography==2.4.2
Django==2.2.3
fabric==2.4.0
Flask==1.1.1
future==0.17.1
gevent==1.4.0
greenlet==0.4.15
grpcio==1.22.0
gunicorn==19.9.0
idna==2.8
invoke==1.2.0
itsdangerous==1.1.0
Jinja2==2.10.1
MarkupSafe==1.1.1
paramiko==2.4.2
protobuf==3.9.0
pyasn1==0.4.5
pycparser==2.19
PyNaCl==1.3.0
python-dateutil==2.8.0
pytz==2019.1
requests==2.22.0
six==1.12.0
six==1.12.0
sqlparse==0.3.0
urllib3==1.25.3
uWSGI==2.0.18
Werkzeug==0.15.5