Merge branch 'master' into check-revoke-revised
This commit is contained in:
commit
8f16688b0a
|
@ -3,7 +3,7 @@ RUN apt-get update
|
|||
RUN apt-get install -y make software-properties-common curl
|
||||
RUN curl -sL https://deb.nodesource.com/setup_7.x | bash -
|
||||
RUN apt-get update
|
||||
RUN apt-get install -y nodejs libldap2-dev libsasl2-dev libldap2-dev libssl-dev
|
||||
RUN apt-get install -y npm libldap2-dev libsasl2-dev libldap2-dev libssl-dev
|
||||
RUN pip install -U setuptools
|
||||
RUN pip install coveralls bandit
|
||||
WORKDIR /app
|
||||
|
|
|
@ -22,7 +22,7 @@ Lemur
|
|||
Lemur manages TLS certificate creation. While not able to issue certificates itself, Lemur acts as a broker between CAs
|
||||
and environments providing a central portal for developers to issue TLS certificates with 'sane' defaults.
|
||||
|
||||
It works on CPython 3.5. We deploy on Ubuntu and develop on OS X.
|
||||
It works on Python 3.7. We deploy on Ubuntu and develop on OS X.
|
||||
|
||||
|
||||
Project resources
|
||||
|
|
|
@ -11,12 +11,12 @@
|
|||
"angular": "1.4.9",
|
||||
"json3": "~3.3",
|
||||
"es5-shim": "~4.5.0",
|
||||
"bootstrap": "~3.3.6",
|
||||
"angular-bootstrap": "~1.1.1",
|
||||
"angular-animate": "~1.4.9",
|
||||
"restangular": "~1.5.1",
|
||||
"ng-table": "~0.8.3",
|
||||
"moment": "~2.11.1",
|
||||
"bootstrap": "~3.4.1",
|
||||
"angular-loading-bar": "~0.8.0",
|
||||
"angular-moment": "~0.10.3",
|
||||
"moment-range": "~2.1.0",
|
||||
|
@ -24,7 +24,7 @@
|
|||
"angularjs-toaster": "~1.0.0",
|
||||
"angular-chart.js": "~0.8.8",
|
||||
"ngletteravatar": "~4.0.0",
|
||||
"bootswatch": "~3.3.6",
|
||||
"bootswatch": "~3.4.1",
|
||||
"fontawesome": "~4.5.0",
|
||||
"satellizer": "~0.13.4",
|
||||
"angular-ui-router": "~0.2.15",
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
*-env
|
||||
docker-compose.yml
|
||||
Dockerfile
|
|
@ -8,12 +8,6 @@ ENV gid 1337
|
|||
ENV user lemur
|
||||
ENV group lemur
|
||||
|
||||
COPY entrypoint /
|
||||
COPY src/lemur.conf.py /home/lemur/.lemur/lemur.conf.py
|
||||
COPY supervisor.conf /
|
||||
COPY nginx/default.conf /etc/nginx/conf.d/
|
||||
COPY nginx/default-ssl.conf /etc/nginx/conf.d/
|
||||
|
||||
RUN addgroup -S ${group} -g ${gid} && \
|
||||
adduser -D -S ${user} -G ${group} -u ${uid} && \
|
||||
apk --update add python3 libldap postgresql-client nginx supervisor curl tzdata openssl bash && \
|
||||
|
@ -40,7 +34,6 @@ RUN addgroup -S ${group} -g ${gid} && \
|
|||
curl -sSL https://github.com/Netflix/lemur/archive/$VERSION.tar.gz | tar xz -C /opt/lemur --strip-components=1 && \
|
||||
pip3 install --upgrade pip && \
|
||||
pip3 install --upgrade setuptools && \
|
||||
chmod +x /entrypoint && \
|
||||
mkdir -p /run/nginx/ /etc/nginx/ssl/ && \
|
||||
chown -R $user:$group /opt/lemur/ /home/lemur/.lemur/
|
||||
|
||||
|
@ -52,6 +45,13 @@ RUN npm install --unsafe-perm && \
|
|||
node_modules/.bin/gulp package --urlContextPath=$(urlContextPath) && \
|
||||
apk del build-dependencies
|
||||
|
||||
COPY entrypoint /
|
||||
COPY src/lemur.conf.py /home/lemur/.lemur/lemur.conf.py
|
||||
COPY supervisor.conf /
|
||||
COPY nginx/default.conf /etc/nginx/conf.d/
|
||||
COPY nginx/default-ssl.conf /etc/nginx/conf.d/
|
||||
|
||||
RUN chmod +x /entrypoint
|
||||
WORKDIR /
|
||||
|
||||
HEALTHCHECK --interval=12s --timeout=12s --start-period=30s \
|
||||
|
|
|
@ -0,0 +1,29 @@
|
|||
version: '3'
|
||||
|
||||
services:
|
||||
postgres:
|
||||
image: "postgres:10"
|
||||
restart: always
|
||||
volumes:
|
||||
- pg_data:/var/lib/postgresql/data
|
||||
env_file:
|
||||
- pgsql-env
|
||||
|
||||
lemur:
|
||||
# image: "netlix-lemur:latest"
|
||||
build: .
|
||||
depends_on:
|
||||
- postgres
|
||||
- redis
|
||||
env_file:
|
||||
- lemur-env
|
||||
- pgsql-env
|
||||
ports:
|
||||
- 80:80
|
||||
- 443:443
|
||||
|
||||
redis:
|
||||
image: "redis:alpine"
|
||||
|
||||
volumes:
|
||||
pg_data: {}
|
|
@ -1,4 +1,6 @@
|
|||
#!/bin/sh
|
||||
#!/bin/bash
|
||||
|
||||
set -eo pipefail
|
||||
|
||||
if [ -z "${POSTGRES_USER}" ] || [ -z "${POSTGRES_PASSWORD}" ] || [ -z "${POSTGRES_HOST}" ] || [ -z "${POSTGRES_DB}" ];then
|
||||
echo "Database vars not set"
|
||||
|
@ -7,22 +9,23 @@ fi
|
|||
|
||||
export POSTGRES_PORT="${POSTGRES_PORT:-5432}"
|
||||
|
||||
echo 'export SQLALCHEMY_DATABASE_URI="postgresql://$POSTGRES_USER:$POSTGRES_PASSWORD@$POSTGRES_HOST:$POSTGRES_PORT/$POSTGRES_DB"' >> /etc/profile
|
||||
export LEMUR_ADMIN_PASSWORD="${LEMUR_ADMIN_PASSWORD:-admin}"
|
||||
|
||||
export SQLALCHEMY_DATABASE_URI="postgresql://$POSTGRES_USER:$POSTGRES_PASSWORD@$POSTGRES_HOST:$POSTGRES_PORT/$POSTGRES_DB"
|
||||
|
||||
source /etc/profile
|
||||
|
||||
PGPASSWORD=$POSTGRES_PASSWORD psql -h $POSTGRES_HOST -p $POSTGRES_PORT -U $POSTGRES_USER -d $POSTGRES_DB --command 'select 1;'
|
||||
|
||||
echo " # Create Postgres trgm extension"
|
||||
PGPASSWORD=$POSTGRES_PASSWORD psql -h $POSTGRES_HOST -p $POSTGRES_PORT -U $POSTGRES_USER -d $POSTGRES_DB --command 'CREATE EXTENSION pg_trgm;'
|
||||
PGPASSWORD=$POSTGRES_PASSWORD psql -h $POSTGRES_HOST -p $POSTGRES_PORT -U $POSTGRES_USER -d $POSTGRES_DB --command 'CREATE EXTENSION IF NOT EXISTS pg_trgm;'
|
||||
echo " # Done"
|
||||
|
||||
if [ -z "${SKIP_SSL}" ]; then
|
||||
if [ ! -f /etc/nginx/ssl/server.crt ] && [ ! -f /etc/nginx/ssl/server.key ]; then
|
||||
openssl req -x509 -newkey rsa:4096 -nodes -keyout /etc/nginx/ssl/server.key -out /etc/nginx/ssl/server.crt -days 365 -subj "/C=US/ST=FAKE/L=FAKE/O=FAKE/OU=FAKE/CN=FAKE"
|
||||
fi
|
||||
mv /etc/nginx/conf.d/default-ssl.conf.a /etc/nginx/conf.d/default-ssl.conf
|
||||
mv /etc/nginx/conf.d/default.conf /etc/nginx/conf.d/default.conf.a
|
||||
[ -f "/etc/nginx/conf.d/default-ssl.conf.a" ] && mv /etc/nginx/conf.d/default-ssl.conf.a /etc/nginx/conf.d/default-ssl.conf
|
||||
[ -f "/etc/nginx/conf.d/default.conf" ] && mv -f /etc/nginx/conf.d/default.conf /etc/nginx/conf.d/default.conf.a
|
||||
fi
|
||||
|
||||
# if [ ! -f /home/lemur/.lemur/lemur.conf.py ]; then
|
||||
|
@ -33,7 +36,7 @@ fi
|
|||
# fi
|
||||
|
||||
echo " # Running init"
|
||||
su lemur -c "python3 /opt/lemur/lemur/manage.py init"
|
||||
su lemur -s /bin/bash -c "cd /opt/lemur/lemur; python3 /opt/lemur/lemur/manage.py init -p ${LEMUR_ADMIN_PASSWORD}"
|
||||
echo " # Done"
|
||||
|
||||
# echo "Creating user"
|
||||
|
|
|
@ -0,0 +1,25 @@
|
|||
# SKIP_SSL=1
|
||||
# LEMUR_TOKEN_SECRET=
|
||||
# LEMUR_DEFAULT_COUNTRY=
|
||||
# LEMUR_DEFAULT_STATE=
|
||||
# LEMUR_DEFAULT_LOCATION=
|
||||
# LEMUR_DEFAULT_ORGANIZATION=
|
||||
# LEMUR_DEFAULT_ORGANIZATIONAL_UNIT=
|
||||
# LEMUR_DEFAULT_ISSUER_PLUGIN=cryptography-issuer
|
||||
# LEMUR_DEFAULT_AUTHORITY=cryptography
|
||||
# MAIL_SERVER=mail.example.com
|
||||
# MAIL_PORT=25
|
||||
# LEMUR_EMAIL=lemur@example.com
|
||||
# LEMUR_SECURITY_TEAM_EMAIL=['team@example.com']
|
||||
# LEMUR_TOKEN_SECRET=
|
||||
# LEMUR_ENCRYPTION_KEYS=['']
|
||||
# DEBUG=True
|
||||
# LDAP_DEBUG=True
|
||||
# LDAP_AUTH=True
|
||||
# LDAP_BIND_URI=ldap://example.com
|
||||
# LDAP_BASE_DN=DC=example,DC=com
|
||||
# LDAP_EMAIL_DOMAIN=example.com
|
||||
# LDAP_USE_TLS=False
|
||||
# LDAP_REQUIRED_GROUP=certificate-management-admins
|
||||
# LDAP_GROUPS_TO_ROLES={'certificate-management-admins': 'admin', 'Team': 'team@example.com'}
|
||||
# LDAP_IS_ACTIVE_DIRECTORY=False
|
|
@ -9,7 +9,7 @@ server {
|
|||
}
|
||||
|
||||
server {
|
||||
listen 443;
|
||||
listen 443 ssl;
|
||||
server_name _;
|
||||
access_log /dev/stdout;
|
||||
error_log /dev/stderr;
|
||||
|
|
|
@ -0,0 +1,4 @@
|
|||
POSTGRES_USER=lemur
|
||||
POSTGRES_PASSWORD=12345
|
||||
POSTGRES_DB=lemur
|
||||
POSTGRES_HOST=postgres
|
|
@ -1,4 +1,6 @@
|
|||
import os
|
||||
from ast import literal_eval
|
||||
|
||||
_basedir = os.path.abspath(os.path.dirname(__file__))
|
||||
|
||||
CORS = os.environ.get("CORS") == "True"
|
||||
|
@ -29,3 +31,13 @@ LOG_LEVEL = str(os.environ.get('LOG_LEVEL','DEBUG'))
|
|||
LOG_FILE = str(os.environ.get('LOG_FILE','/home/lemur/.lemur/lemur.log'))
|
||||
|
||||
SQLALCHEMY_DATABASE_URI = os.environ.get('SQLALCHEMY_DATABASE_URI','postgresql://lemur:lemur@localhost:5432/lemur')
|
||||
|
||||
LDAP_DEBUG = os.environ.get('LDAP_DEBUG') == "True"
|
||||
LDAP_AUTH = os.environ.get('LDAP_AUTH') == "True"
|
||||
LDAP_IS_ACTIVE_DIRECTORY = os.environ.get('LDAP_IS_ACTIVE_DIRECTORY') == "True"
|
||||
LDAP_BIND_URI = str(os.environ.get('LDAP_BIND_URI',''))
|
||||
LDAP_BASE_DN = str(os.environ.get('LDAP_BASE_DN',''))
|
||||
LDAP_EMAIL_DOMAIN = str(os.environ.get('LDAP_EMAIL_DOMAIN',''))
|
||||
LDAP_USE_TLS = str(os.environ.get('LDAP_USE_TLS',''))
|
||||
LDAP_REQUIRED_GROUP = str(os.environ.get('LDAP_REQUIRED_GROUP',''))
|
||||
LDAP_GROUPS_TO_ROLES = literal_eval(os.environ.get('LDAP_GROUPS_TO_ROLES') or "{}")
|
||||
|
|
|
@ -735,6 +735,12 @@ The following configuration properties are required to use the Digicert issuer p
|
|||
This is the default validity (in years), if no end date is specified. (Default: 1)
|
||||
|
||||
|
||||
.. data:: DIGICERT_MAX_VALIDITY
|
||||
:noindex:
|
||||
|
||||
This is the maximum validity (in years). (Default: value of DIGICERT_DEFAULT_VALIDITY)
|
||||
|
||||
|
||||
.. data:: DIGICERT_PRIVATE
|
||||
:noindex:
|
||||
|
||||
|
@ -973,6 +979,53 @@ Will be the sender of all notifications, so ensure that it is verified with AWS.
|
|||
SES if the default notification gateway and will be used unless SMTP settings are configured in the application configuration
|
||||
settings.
|
||||
|
||||
PowerDNS ACME Plugin
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The following configuration properties are required to use the PowerDNS ACME Plugin for domain validation.
|
||||
|
||||
|
||||
.. data:: ACME_POWERDNS_DOMAIN
|
||||
:noindex:
|
||||
|
||||
This is the FQDN for the PowerDNS API (without path)
|
||||
|
||||
|
||||
.. data:: ACME_POWERDNS_SERVERID
|
||||
:noindex:
|
||||
|
||||
This is the ServerID attribute of the PowerDNS API Server (i.e. "localhost")
|
||||
|
||||
|
||||
.. data:: ACME_POWERDNS_APIKEYNAME
|
||||
:noindex:
|
||||
|
||||
This is the Key name to use for authentication (i.e. "X-API-Key")
|
||||
|
||||
|
||||
.. data:: ACME_POWERDNS_APIKEY
|
||||
:noindex:
|
||||
|
||||
This is the API Key to use for authentication (i.e. "Password")
|
||||
|
||||
|
||||
.. data:: ACME_POWERDNS_RETRIES
|
||||
:noindex:
|
||||
|
||||
This is the number of times DNS Verification should be attempted (i.e. 20)
|
||||
|
||||
|
||||
.. data:: ACME_POWERDNS_VERIFY
|
||||
:noindex:
|
||||
|
||||
This configures how TLS certificates on the PowerDNS API target are validated. The PowerDNS Plugin depends on the PyPi requests library, which supports the following options for the verify parameter:
|
||||
|
||||
True: Verifies the TLS certificate was issued by a known publicly-trusted CA. (Default)
|
||||
|
||||
False: Disables certificate validation (Not Recommended)
|
||||
|
||||
File/Dir path to CA Bundle: Verifies the TLS certificate was issued by a Certificate Authority in the provided CA bundle.
|
||||
|
||||
.. _CommandLineInterface:
|
||||
|
||||
Command Line Interface
|
||||
|
@ -1071,6 +1124,15 @@ All commands default to `~/.lemur/lemur.conf.py` if a configuration is not speci
|
|||
lemur notify
|
||||
|
||||
|
||||
.. data:: acme
|
||||
|
||||
Handles all ACME related tasks, like ACME plugin testing.
|
||||
|
||||
::
|
||||
|
||||
lemur acme
|
||||
|
||||
|
||||
Sub-commands
|
||||
------------
|
||||
|
||||
|
@ -1172,11 +1234,12 @@ Acme
|
|||
Kevin Glisson <kglisson@netflix.com>,
|
||||
Curtis Castrapel <ccastrapel@netflix.com>,
|
||||
Hossein Shafagh <hshafagh@netflix.com>,
|
||||
Mikhail Khodorovskiy <mikhail.khodorovskiy@jivesoftware.com>
|
||||
Mikhail Khodorovskiy <mikhail.khodorovskiy@jivesoftware.com>,
|
||||
Chad Sine <csine@netflix.com>
|
||||
:Type:
|
||||
Issuer
|
||||
:Description:
|
||||
Adds support for the ACME protocol (including LetsEncrypt) with domain validation being handled Route53.
|
||||
Adds support for the ACME protocol (including LetsEncrypt) with domain validation using several providers.
|
||||
|
||||
|
||||
Atlas
|
||||
|
|
|
@ -390,6 +390,10 @@ Here are the Celery configuration variables that should be set::
|
|||
CELERY_IMPORTS = ('lemur.common.celery')
|
||||
CELERY_TIMEZONE = 'UTC'
|
||||
|
||||
Do not forget to import crontab module in your configuration file::
|
||||
|
||||
from celery.task.schedules import crontab
|
||||
|
||||
You must start a single Celery scheduler instance and one or more worker instances in order to handle incoming tasks.
|
||||
The scheduler can be started with::
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ If installing Lemur on a bare Ubuntu OS you will need to grab the following pack
|
|||
|
||||
.. note:: PostgreSQL is only required if your database is going to be on the same host as the webserver. npm is needed if you're installing Lemur from the source (e.g., from git).
|
||||
|
||||
.. note:: Installing node from a package manager may creat the nodejs bin at /usr/bin/nodejs instead of /usr/bin/node If that is the case run the following
|
||||
.. note:: Installing node from a package manager may create the nodejs bin at /usr/bin/nodejs instead of /usr/bin/node If that is the case run the following
|
||||
sudo ln -s /user/bin/nodejs /usr/bin/node
|
||||
|
||||
Now, install Python ``virtualenv`` package:
|
||||
|
@ -180,6 +180,13 @@ Lemur provides a helpful command that will initialize your database for you. It
|
|||
|
||||
In addition to creating a new user, Lemur also creates a few default email notifications. These notifications are based on a few configuration options such as ``LEMUR_SECURITY_TEAM_EMAIL``. They basically guarantee that every certificate within Lemur will send one expiration notification to the security team.
|
||||
|
||||
Your database installation requires the pg_trgm extension. If you do not have this installed already, you can allow the script to install this for you by adding the SUPERUSER permission to the lemur database user.
|
||||
|
||||
.. code-block:: bash
|
||||
sudo -u postgres -i
|
||||
psql
|
||||
postgres=# ALTER USER lemur WITH SUPERUSER
|
||||
|
||||
Additional notifications can be created through the UI or API. See :ref:`Creating Notifications <CreatingNotifications>` and :ref:`Command Line Interface <CommandLineInterface>` for details.
|
||||
|
||||
**Make note of the password used as this will be used during first login to the Lemur UI.**
|
||||
|
@ -189,10 +196,16 @@ Additional notifications can be created through the UI or API. See :ref:`Creati
|
|||
cd /www/lemur/lemur
|
||||
lemur init
|
||||
|
||||
.. note:: If you added the SUPERUSER permission to the lemur database user above, it is recommended you revoke that permission now.
|
||||
|
||||
.. code-block:: bash
|
||||
sudo -u postgres -i
|
||||
psql
|
||||
postgres=# ALTER USER lemur WITH NOSUPERUSER
|
||||
|
||||
|
||||
.. note:: It is recommended that once the ``lemur`` user is created that you create individual users for every day access. There is currently no way for a user to self enroll for Lemur access, they must have an administrator create an account for them or be enrolled automatically through SSO. This can be done through the CLI or UI. See :ref:`Creating Users <CreatingUsers>` and :ref:`Command Line Interface <CommandLineInterface>` for details.
|
||||
|
||||
|
||||
Set Up a Reverse Proxy
|
||||
---------------------
|
||||
|
||||
|
|
|
@ -0,0 +1,86 @@
|
|||
import time
|
||||
import json
|
||||
|
||||
from flask_script import Manager
|
||||
from flask import current_app
|
||||
|
||||
from lemur.extensions import sentry
|
||||
from lemur.constants import SUCCESS_METRIC_STATUS
|
||||
from lemur.plugins.lemur_acme.plugin import AcmeHandler
|
||||
|
||||
manager = Manager(
|
||||
usage="Handles all ACME related tasks"
|
||||
)
|
||||
|
||||
|
||||
@manager.option(
|
||||
"-d",
|
||||
"--domain",
|
||||
dest="domain",
|
||||
required=True,
|
||||
help="Name of the Domain to store to (ex. \"_acme-chall.test.com\".",
|
||||
)
|
||||
@manager.option(
|
||||
"-t",
|
||||
"--token",
|
||||
dest="token",
|
||||
required=True,
|
||||
help="Value of the Token to store in DNS as content.",
|
||||
)
|
||||
def dnstest(domain, token):
|
||||
"""
|
||||
Create, verify, and delete DNS TXT records using an autodetected provider.
|
||||
"""
|
||||
print("[+] Starting ACME Tests.")
|
||||
change_id = (domain, token)
|
||||
|
||||
acme_handler = AcmeHandler()
|
||||
acme_handler.autodetect_dns_providers(domain)
|
||||
if not acme_handler.dns_providers_for_domain[domain]:
|
||||
raise Exception(f"No DNS providers found for domain: {format(domain)}.")
|
||||
|
||||
# Create TXT Records
|
||||
for dns_provider in acme_handler.dns_providers_for_domain[domain]:
|
||||
dns_provider_plugin = acme_handler.get_dns_provider(dns_provider.provider_type)
|
||||
dns_provider_options = json.loads(dns_provider.credentials)
|
||||
account_number = dns_provider_options.get("account_id")
|
||||
|
||||
print(f"[+] Creating TXT Record in `{dns_provider.name}` provider")
|
||||
change_id = dns_provider_plugin.create_txt_record(domain, token, account_number)
|
||||
|
||||
print("[+] Verifying TXT Record has propagated to DNS.")
|
||||
print("[+] This step could take a while...")
|
||||
time.sleep(10)
|
||||
|
||||
# Verify TXT Records
|
||||
for dns_provider in acme_handler.dns_providers_for_domain[domain]:
|
||||
dns_provider_plugin = acme_handler.get_dns_provider(dns_provider.provider_type)
|
||||
dns_provider_options = json.loads(dns_provider.credentials)
|
||||
account_number = dns_provider_options.get("account_id")
|
||||
|
||||
try:
|
||||
dns_provider_plugin.wait_for_dns_change(change_id, account_number)
|
||||
print(f"[+] Verified TXT Record in `{dns_provider.name}` provider")
|
||||
except Exception:
|
||||
sentry.captureException()
|
||||
current_app.logger.debug(
|
||||
f"Unable to resolve DNS challenge for change_id: {change_id}, account_id: "
|
||||
f"{account_number}",
|
||||
exc_info=True,
|
||||
)
|
||||
print(f"[+] Unable to Verify TXT Record in `{dns_provider.name}` provider")
|
||||
|
||||
time.sleep(10)
|
||||
|
||||
# Delete TXT Records
|
||||
for dns_provider in acme_handler.dns_providers_for_domain[domain]:
|
||||
dns_provider_plugin = acme_handler.get_dns_provider(dns_provider.provider_type)
|
||||
dns_provider_options = json.loads(dns_provider.credentials)
|
||||
account_number = dns_provider_options.get("account_id")
|
||||
|
||||
# TODO(csine@: Add Exception Handling
|
||||
dns_provider_plugin.delete_txt_record(change_id, account_number, domain, token)
|
||||
print(f"[+] Deleted TXT Record in `{dns_provider.name}` provider")
|
||||
|
||||
status = SUCCESS_METRIC_STATUS
|
||||
print("[+] Done with ACME Tests.")
|
|
@ -105,7 +105,7 @@ class LdapPrincipal:
|
|||
role = role_service.get_by_name(self.ldap_default_role)
|
||||
if role:
|
||||
if not role.third_party:
|
||||
role = role.set_third_party(role.id, third_party_status=True)
|
||||
role = role_service.set_third_party(role.id, third_party_status=True)
|
||||
roles.add(role)
|
||||
|
||||
# update their 'roles'
|
||||
|
|
|
@ -127,6 +127,10 @@ def retrieve_user(user_api_url, access_token):
|
|||
|
||||
# retrieve information about the current user.
|
||||
r = requests.get(user_api_url, params=user_params, headers=headers)
|
||||
# Some IDPs, like "Keycloak", require a POST instead of a GET
|
||||
if r.status_code == 400:
|
||||
r = requests.post(user_api_url, data=user_params, headers=headers)
|
||||
|
||||
profile = r.json()
|
||||
|
||||
user = user_service.get_by_email(profile["email"])
|
||||
|
@ -434,7 +438,7 @@ class OAuth2(Resource):
|
|||
verify_cert=verify_cert,
|
||||
)
|
||||
|
||||
jwks_url = current_app.config.get("PING_JWKS_URL")
|
||||
jwks_url = current_app.config.get("OAUTH2_JWKS_URL")
|
||||
error_code = validate_id_token(id_token, args["clientId"], jwks_url)
|
||||
if error_code:
|
||||
return error_code
|
||||
|
|
|
@ -5,29 +5,19 @@
|
|||
:license: Apache, see LICENSE for more details.
|
||||
.. moduleauthor:: Kevin Glisson <kglisson@netflix.com>
|
||||
"""
|
||||
import sys
|
||||
import multiprocessing
|
||||
from tabulate import tabulate
|
||||
from sqlalchemy import or_
|
||||
|
||||
import sys
|
||||
from flask import current_app
|
||||
|
||||
from flask_script import Manager
|
||||
from flask_principal import Identity, identity_changed
|
||||
|
||||
from flask_script import Manager
|
||||
from sqlalchemy import or_
|
||||
from tabulate import tabulate
|
||||
|
||||
from lemur import database
|
||||
from lemur.extensions import sentry
|
||||
from lemur.extensions import metrics
|
||||
from lemur.plugins.base import plugins
|
||||
from lemur.constants import SUCCESS_METRIC_STATUS, FAILURE_METRIC_STATUS
|
||||
from lemur.deployment import service as deployment_service
|
||||
from lemur.endpoints import service as endpoint_service
|
||||
from lemur.notifications.messaging import send_rotation_notification
|
||||
from lemur.domains.models import Domain
|
||||
from lemur.authorities.models import Authority
|
||||
from lemur.certificates.schemas import CertificateOutputSchema
|
||||
from lemur.authorities.service import get as authorities_get_by_id
|
||||
from lemur.certificates.models import Certificate
|
||||
from lemur.certificates.schemas import CertificateOutputSchema
|
||||
from lemur.certificates.service import (
|
||||
reissue_certificate,
|
||||
get_certificate_primitives,
|
||||
|
@ -35,9 +25,16 @@ from lemur.certificates.service import (
|
|||
get_by_name,
|
||||
get_all_valid_certs,
|
||||
get,
|
||||
get_all_certs_attached_to_endpoint_without_autorotate,
|
||||
)
|
||||
|
||||
from lemur.certificates.verify import verify_string
|
||||
from lemur.constants import SUCCESS_METRIC_STATUS, FAILURE_METRIC_STATUS
|
||||
from lemur.deployment import service as deployment_service
|
||||
from lemur.domains.models import Domain
|
||||
from lemur.endpoints import service as endpoint_service
|
||||
from lemur.extensions import sentry, metrics
|
||||
from lemur.notifications.messaging import send_rotation_notification
|
||||
from lemur.plugins.base import plugins
|
||||
|
||||
manager = Manager(usage="Handles all certificate related tasks.")
|
||||
|
||||
|
@ -503,3 +500,45 @@ def check_revoked():
|
|||
cert.status = "unknown"
|
||||
|
||||
database.update(cert)
|
||||
|
||||
|
||||
@manager.command
|
||||
def automatically_enable_autorotate():
|
||||
"""
|
||||
This function automatically enables auto-rotation for unexpired certificates that are
|
||||
attached to an endpoint but do not have autorotate enabled.
|
||||
|
||||
WARNING: This will overwrite the Auto-rotate toggle!
|
||||
"""
|
||||
log_data = {
|
||||
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
|
||||
"message": "Enabling auto-rotate for certificate"
|
||||
}
|
||||
|
||||
permitted_authorities = current_app.config.get("ENABLE_AUTO_ROTATE_AUTHORITY", [])
|
||||
|
||||
eligible_certs = get_all_certs_attached_to_endpoint_without_autorotate()
|
||||
for cert in eligible_certs:
|
||||
|
||||
if cert.authority_id not in permitted_authorities:
|
||||
continue
|
||||
|
||||
log_data["certificate"] = cert.name
|
||||
log_data["certificate_id"] = cert.id
|
||||
log_data["authority_id"] = cert.authority_id
|
||||
log_data["authority_name"] = authorities_get_by_id(cert.authority_id).name
|
||||
if cert.destinations:
|
||||
log_data["destination_names"] = ', '.join([d.label for d in cert.destinations])
|
||||
else:
|
||||
log_data["destination_names"] = "NONE"
|
||||
current_app.logger.info(log_data)
|
||||
metrics.send("automatically_enable_autorotate",
|
||||
"counter", 1,
|
||||
metric_tags={"certificate": log_data["certificate"],
|
||||
"certificate_id": log_data["certificate_id"],
|
||||
"authority_id": log_data["authority_id"],
|
||||
"authority_name": log_data["authority_name"],
|
||||
"destination_names": log_data["destination_names"]
|
||||
})
|
||||
cert.rotation = True
|
||||
database.update(cert)
|
||||
|
|
|
@ -321,7 +321,8 @@ class Certificate(db.Model):
|
|||
|
||||
@hybrid_property
|
||||
def expired(self):
|
||||
if self.not_after <= arrow.utcnow():
|
||||
# can't compare offset-naive and offset-aware datetimes
|
||||
if arrow.Arrow.fromdatetime(self.not_after) <= arrow.utcnow():
|
||||
return True
|
||||
|
||||
@expired.expression
|
||||
|
@ -445,6 +446,9 @@ def update_destinations(target, value, initiator):
|
|||
"""
|
||||
destination_plugin = plugins.get(value.plugin_name)
|
||||
status = FAILURE_METRIC_STATUS
|
||||
|
||||
if target.expired:
|
||||
return
|
||||
try:
|
||||
if target.private_key or not destination_plugin.requires_key:
|
||||
destination_plugin.upload(
|
||||
|
|
|
@ -6,6 +6,8 @@
|
|||
.. moduleauthor:: Kevin Glisson <kglisson@netflix.com>
|
||||
"""
|
||||
from flask import current_app
|
||||
from flask_restful import inputs
|
||||
from flask_restful.reqparse import RequestParser
|
||||
from marshmallow import fields, validate, validates_schema, post_load, pre_load
|
||||
from marshmallow.exceptions import ValidationError
|
||||
|
||||
|
@ -117,6 +119,9 @@ class CertificateInputSchema(CertificateCreationSchema):
|
|||
|
||||
@validates_schema
|
||||
def validate_authority(self, data):
|
||||
if 'authority' not in data:
|
||||
raise ValidationError("Missing Authority.")
|
||||
|
||||
if isinstance(data["authority"], str):
|
||||
raise ValidationError("Authority not found.")
|
||||
|
||||
|
@ -285,6 +290,16 @@ class CertificateOutputSchema(LemurOutputSchema):
|
|||
rotation_policy = fields.Nested(RotationPolicyNestedOutputSchema)
|
||||
|
||||
|
||||
class CertificateShortOutputSchema(LemurOutputSchema):
|
||||
id = fields.Integer()
|
||||
name = fields.String()
|
||||
owner = fields.Email()
|
||||
notify = fields.Boolean()
|
||||
authority = fields.Nested(AuthorityNestedOutputSchema)
|
||||
issuer = fields.String()
|
||||
cn = fields.String()
|
||||
|
||||
|
||||
class CertificateUploadInputSchema(CertificateCreationSchema):
|
||||
name = fields.String()
|
||||
authority = fields.Nested(AssociatedAuthoritySchema, required=False)
|
||||
|
@ -363,9 +378,22 @@ class CertificateRevokeSchema(LemurInputSchema):
|
|||
comments = fields.String()
|
||||
|
||||
|
||||
certificates_list_request_parser = RequestParser()
|
||||
certificates_list_request_parser.add_argument("short", type=inputs.boolean, default=False, location="args")
|
||||
|
||||
|
||||
def certificates_list_output_schema_factory():
|
||||
args = certificates_list_request_parser.parse_args()
|
||||
if args["short"]:
|
||||
return certificates_short_output_schema
|
||||
else:
|
||||
return certificates_output_schema
|
||||
|
||||
|
||||
certificate_input_schema = CertificateInputSchema()
|
||||
certificate_output_schema = CertificateOutputSchema()
|
||||
certificates_output_schema = CertificateOutputSchema(many=True)
|
||||
certificates_short_output_schema = CertificateShortOutputSchema(many=True)
|
||||
certificate_upload_input_schema = CertificateUploadInputSchema()
|
||||
certificate_export_input_schema = CertificateExportInputSchema()
|
||||
certificate_edit_input_schema = CertificateEditInputSchema()
|
||||
|
|
|
@ -123,12 +123,13 @@ def get_all_valid_certs(authority_plugin_name):
|
|||
)
|
||||
|
||||
|
||||
def get_all_pending_cleaning(source):
|
||||
def get_all_pending_cleaning_expired(source):
|
||||
"""
|
||||
Retrieves all certificates that are available for cleaning.
|
||||
Retrieves all certificates that are available for cleaning. These are certificates which are expired and are not
|
||||
attached to any endpoints.
|
||||
|
||||
:param source:
|
||||
:return:
|
||||
:param source: the source to search for certificates
|
||||
:return: list of pending certificates
|
||||
"""
|
||||
return (
|
||||
Certificate.query.filter(Certificate.sources.any(id=source.id))
|
||||
|
@ -138,6 +139,58 @@ def get_all_pending_cleaning(source):
|
|||
)
|
||||
|
||||
|
||||
def get_all_certs_attached_to_endpoint_without_autorotate():
|
||||
"""
|
||||
Retrieves all certificates that are attached to an endpoint, but that do not have autorotate enabled.
|
||||
|
||||
:return: list of certificates attached to an endpoint without autorotate
|
||||
"""
|
||||
return (
|
||||
Certificate.query.filter(Certificate.endpoints.any())
|
||||
.filter(Certificate.rotation == False)
|
||||
.filter(Certificate.not_after >= arrow.now())
|
||||
.filter(not_(Certificate.replaced.any()))
|
||||
.all() # noqa
|
||||
)
|
||||
|
||||
|
||||
def get_all_pending_cleaning_expiring_in_days(source, days_to_expire):
|
||||
"""
|
||||
Retrieves all certificates that are available for cleaning, not attached to endpoint,
|
||||
and within X days from expiration.
|
||||
|
||||
:param days_to_expire: defines how many days till the certificate is expired
|
||||
:param source: the source to search for certificates
|
||||
:return: list of pending certificates
|
||||
"""
|
||||
expiration_window = arrow.now().shift(days=+days_to_expire).format("YYYY-MM-DD")
|
||||
return (
|
||||
Certificate.query.filter(Certificate.sources.any(id=source.id))
|
||||
.filter(not_(Certificate.endpoints.any()))
|
||||
.filter(Certificate.not_after < expiration_window)
|
||||
.all()
|
||||
)
|
||||
|
||||
|
||||
def get_all_pending_cleaning_issued_since_days(source, days_since_issuance):
|
||||
"""
|
||||
Retrieves all certificates that are available for cleaning: not attached to endpoint, and X days since issuance.
|
||||
|
||||
:param days_since_issuance: defines how many days since the certificate is issued
|
||||
:param source: the source to search for certificates
|
||||
:return: list of pending certificates
|
||||
"""
|
||||
not_in_use_window = (
|
||||
arrow.now().shift(days=-days_since_issuance).format("YYYY-MM-DD")
|
||||
)
|
||||
return (
|
||||
Certificate.query.filter(Certificate.sources.any(id=source.id))
|
||||
.filter(not_(Certificate.endpoints.any()))
|
||||
.filter(Certificate.date_created > not_in_use_window)
|
||||
.all()
|
||||
)
|
||||
|
||||
|
||||
def get_all_pending_reissue():
|
||||
"""
|
||||
Retrieves all certificates that need to be rotated.
|
||||
|
@ -352,9 +405,11 @@ def render(args):
|
|||
|
||||
show_expired = args.pop("showExpired")
|
||||
if show_expired != 1:
|
||||
one_month_old = arrow.now()\
|
||||
.shift(months=current_app.config.get("HIDE_EXPIRED_CERTS_AFTER_MONTHS", -1))\
|
||||
one_month_old = (
|
||||
arrow.now()
|
||||
.shift(months=current_app.config.get("HIDE_EXPIRED_CERTS_AFTER_MONTHS", -1))
|
||||
.format("YYYY-MM-DD")
|
||||
)
|
||||
query = query.filter(Certificate.not_after > one_month_old)
|
||||
|
||||
time_range = args.pop("time_range")
|
||||
|
@ -414,6 +469,9 @@ def render(args):
|
|||
Certificate.cn.ilike(term),
|
||||
)
|
||||
)
|
||||
elif "fixedName" in terms:
|
||||
# only what matches the fixed name directly if a fixedname is provided
|
||||
query = query.filter(Certificate.name == terms[1])
|
||||
else:
|
||||
query = database.filter(query, Certificate, terms)
|
||||
|
||||
|
@ -440,7 +498,7 @@ def render(args):
|
|||
)
|
||||
|
||||
if time_range:
|
||||
to = arrow.now().replace(weeks=+time_range).format("YYYY-MM-DD")
|
||||
to = arrow.now().shift(weeks=+time_range).format("YYYY-MM-DD")
|
||||
now = arrow.now().format("YYYY-MM-DD")
|
||||
query = query.filter(Certificate.not_after <= to).filter(
|
||||
Certificate.not_after >= now
|
||||
|
@ -582,7 +640,7 @@ def stats(**kwargs):
|
|||
"""
|
||||
if kwargs.get("metric") == "not_after":
|
||||
start = arrow.utcnow()
|
||||
end = start.replace(weeks=+32)
|
||||
end = start.shift(weeks=+32)
|
||||
items = (
|
||||
database.db.session.query(Certificate.issuer, func.count(Certificate.id))
|
||||
.group_by(Certificate.issuer)
|
||||
|
|
|
@ -27,6 +27,7 @@ from lemur.certificates.schemas import (
|
|||
certificates_output_schema,
|
||||
certificate_export_input_schema,
|
||||
certificate_edit_input_schema,
|
||||
certificates_list_output_schema_factory,
|
||||
)
|
||||
|
||||
from lemur.roles import service as role_service
|
||||
|
@ -250,7 +251,7 @@ class CertificatesList(AuthenticatedResource):
|
|||
self.reqparse = reqparse.RequestParser()
|
||||
super(CertificatesList, self).__init__()
|
||||
|
||||
@validate_schema(None, certificates_output_schema)
|
||||
@validate_schema(None, certificates_list_output_schema_factory)
|
||||
def get(self):
|
||||
"""
|
||||
.. http:get:: /certificates
|
||||
|
|
|
@ -10,27 +10,27 @@ command: celery -A lemur.common.celery worker --loglevel=info -l DEBUG -B
|
|||
import copy
|
||||
import sys
|
||||
import time
|
||||
from datetime import datetime, timezone, timedelta
|
||||
|
||||
from celery import Celery
|
||||
from celery.app.task import Context
|
||||
from celery.exceptions import SoftTimeLimitExceeded
|
||||
from celery.signals import task_failure, task_received, task_revoked, task_success
|
||||
from datetime import datetime, timezone, timedelta
|
||||
from flask import current_app
|
||||
|
||||
from lemur.authorities.service import get as get_authority
|
||||
from lemur.certificates import cli as cli_certificate
|
||||
from lemur.common.redis import RedisHandler
|
||||
from lemur.destinations import service as destinations_service
|
||||
from lemur.dns_providers import cli as cli_dns_providers
|
||||
from lemur.endpoints import cli as cli_endpoints
|
||||
from lemur.extensions import metrics, sentry
|
||||
from lemur.factory import create_app
|
||||
from lemur.notifications import cli as cli_notification
|
||||
from lemur.notifications.messaging import send_pending_failure_notification
|
||||
from lemur.pending_certificates import service as pending_certificate_service
|
||||
from lemur.plugins.base import plugins
|
||||
from lemur.sources.cli import clean, sync, validate_sources
|
||||
from lemur.sources.service import add_aws_destination_to_sources
|
||||
from lemur.certificates import cli as cli_certificate
|
||||
from lemur.dns_providers import cli as cli_dns_providers
|
||||
from lemur.notifications import cli as cli_notification
|
||||
from lemur.endpoints import cli as cli_endpoints
|
||||
|
||||
|
||||
if current_app:
|
||||
flask_app = current_app
|
||||
|
@ -67,7 +67,7 @@ def is_task_active(fun, task_id, args):
|
|||
from celery.task.control import inspect
|
||||
|
||||
if not args:
|
||||
args = '()' # empty args
|
||||
args = "()" # empty args
|
||||
|
||||
i = inspect()
|
||||
active_tasks = i.active()
|
||||
|
@ -80,6 +80,37 @@ def is_task_active(fun, task_id, args):
|
|||
return False
|
||||
|
||||
|
||||
def get_celery_request_tags(**kwargs):
|
||||
request = kwargs.get("request")
|
||||
sender_hostname = "unknown"
|
||||
sender = kwargs.get("sender")
|
||||
if sender:
|
||||
try:
|
||||
sender_hostname = sender.hostname
|
||||
except AttributeError:
|
||||
sender_hostname = vars(sender.request).get("origin", "unknown")
|
||||
if request and not isinstance(
|
||||
request, Context
|
||||
): # unlike others, task_revoked sends a Context for `request`
|
||||
task_name = request.name
|
||||
task_id = request.id
|
||||
receiver_hostname = request.hostname
|
||||
else:
|
||||
task_name = sender.name
|
||||
task_id = sender.request.id
|
||||
receiver_hostname = sender.request.hostname
|
||||
|
||||
tags = {
|
||||
"task_name": task_name,
|
||||
"task_id": task_id,
|
||||
"sender_hostname": sender_hostname,
|
||||
"receiver_hostname": receiver_hostname,
|
||||
}
|
||||
if kwargs.get("exception"):
|
||||
tags["error"] = repr(kwargs["exception"])
|
||||
return tags
|
||||
|
||||
|
||||
@celery.task()
|
||||
def report_celery_last_success_metrics():
|
||||
"""
|
||||
|
@ -89,7 +120,6 @@ def report_celery_last_success_metrics():
|
|||
report_celery_last_success_metrics should be ran periodically to emit metrics on when a task was last successful.
|
||||
Admins can then alert when tasks are not ran when intended. Admins should also alert when no metrics are emitted
|
||||
from this function.
|
||||
|
||||
"""
|
||||
function = f"{__name__}.{sys._getframe().f_code.co_name}"
|
||||
task_id = None
|
||||
|
@ -108,15 +138,91 @@ def report_celery_last_success_metrics():
|
|||
return
|
||||
|
||||
current_time = int(time.time())
|
||||
schedule = current_app.config.get('CELERYBEAT_SCHEDULE')
|
||||
schedule = current_app.config.get("CELERYBEAT_SCHEDULE")
|
||||
for _, t in schedule.items():
|
||||
task = t.get("task")
|
||||
last_success = int(red.get(f"{task}.last_success") or 0)
|
||||
metrics.send(f"{task}.time_since_last_success", 'gauge', current_time - last_success)
|
||||
metrics.send(
|
||||
f"{task}.time_since_last_success", "gauge", current_time - last_success
|
||||
)
|
||||
red.set(
|
||||
f"{function}.last_success", int(time.time())
|
||||
) # Alert if this metric is not seen
|
||||
metrics.send(f"{function}.success", 'counter', 1)
|
||||
metrics.send(f"{function}.success", "counter", 1)
|
||||
|
||||
|
||||
@task_received.connect
|
||||
def report_number_pending_tasks(**kwargs):
|
||||
"""
|
||||
Report the number of pending tasks to our metrics broker every time a task is published. This metric can be used
|
||||
for autoscaling workers.
|
||||
https://docs.celeryproject.org/en/latest/userguide/signals.html#task-received
|
||||
"""
|
||||
with flask_app.app_context():
|
||||
metrics.send(
|
||||
"celery.new_pending_task",
|
||||
"TIMER",
|
||||
1,
|
||||
metric_tags=get_celery_request_tags(**kwargs),
|
||||
)
|
||||
|
||||
|
||||
@task_success.connect
|
||||
def report_successful_task(**kwargs):
|
||||
"""
|
||||
Report a generic success metric as tasks to our metrics broker every time a task finished correctly.
|
||||
This metric can be used for autoscaling workers.
|
||||
https://docs.celeryproject.org/en/latest/userguide/signals.html#task-success
|
||||
"""
|
||||
with flask_app.app_context():
|
||||
tags = get_celery_request_tags(**kwargs)
|
||||
red.set(f"{tags['task_name']}.last_success", int(time.time()))
|
||||
metrics.send("celery.successful_task", "TIMER", 1, metric_tags=tags)
|
||||
|
||||
|
||||
@task_failure.connect
|
||||
def report_failed_task(**kwargs):
|
||||
"""
|
||||
Report a generic failure metric as tasks to our metrics broker every time a task fails.
|
||||
This metric can be used for alerting.
|
||||
https://docs.celeryproject.org/en/latest/userguide/signals.html#task-failure
|
||||
"""
|
||||
with flask_app.app_context():
|
||||
log_data = {
|
||||
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
|
||||
"Message": "Celery Task Failure",
|
||||
}
|
||||
|
||||
# Add traceback if exception info is in the kwargs
|
||||
einfo = kwargs.get("einfo")
|
||||
if einfo:
|
||||
log_data["traceback"] = einfo.traceback
|
||||
|
||||
error_tags = get_celery_request_tags(**kwargs)
|
||||
|
||||
log_data.update(error_tags)
|
||||
current_app.logger.error(log_data)
|
||||
metrics.send("celery.failed_task", "TIMER", 1, metric_tags=error_tags)
|
||||
|
||||
|
||||
@task_revoked.connect
|
||||
def report_revoked_task(**kwargs):
|
||||
"""
|
||||
Report a generic failure metric as tasks to our metrics broker every time a task is revoked.
|
||||
This metric can be used for alerting.
|
||||
https://docs.celeryproject.org/en/latest/userguide/signals.html#task-revoked
|
||||
"""
|
||||
with flask_app.app_context():
|
||||
log_data = {
|
||||
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
|
||||
"Message": "Celery Task Revoked",
|
||||
}
|
||||
|
||||
error_tags = get_celery_request_tags(**kwargs)
|
||||
|
||||
log_data.update(error_tags)
|
||||
current_app.logger.error(log_data)
|
||||
metrics.send("celery.revoked_task", "TIMER", 1, metric_tags=error_tags)
|
||||
|
||||
|
||||
@celery.task(soft_time_limit=600)
|
||||
|
@ -217,15 +323,15 @@ def fetch_acme_cert(id):
|
|||
log_data["failed"] = failed
|
||||
log_data["wrong_issuer"] = wrong_issuer
|
||||
current_app.logger.debug(log_data)
|
||||
metrics.send(f"{function}.resolved", 'gauge', new)
|
||||
metrics.send(f"{function}.failed", 'gauge', failed)
|
||||
metrics.send(f"{function}.wrong_issuer", 'gauge', wrong_issuer)
|
||||
metrics.send(f"{function}.resolved", "gauge", new)
|
||||
metrics.send(f"{function}.failed", "gauge", failed)
|
||||
metrics.send(f"{function}.wrong_issuer", "gauge", wrong_issuer)
|
||||
print(
|
||||
"[+] Certificates: New: {new} Failed: {failed} Not using ACME: {wrong_issuer}".format(
|
||||
new=new, failed=failed, wrong_issuer=wrong_issuer
|
||||
)
|
||||
)
|
||||
red.set(f'{function}.last_success', int(time.time()))
|
||||
return log_data
|
||||
|
||||
|
||||
@celery.task()
|
||||
|
@ -262,8 +368,8 @@ def fetch_all_pending_acme_certs():
|
|||
current_app.logger.debug(log_data)
|
||||
fetch_acme_cert.delay(cert.id)
|
||||
|
||||
red.set(f'{function}.last_success', int(time.time()))
|
||||
metrics.send(f"{function}.success", 'counter', 1)
|
||||
metrics.send(f"{function}.success", "counter", 1)
|
||||
return log_data
|
||||
|
||||
|
||||
@celery.task()
|
||||
|
@ -296,8 +402,8 @@ def remove_old_acme_certs():
|
|||
current_app.logger.debug(log_data)
|
||||
pending_certificate_service.delete(cert)
|
||||
|
||||
red.set(f'{function}.last_success', int(time.time()))
|
||||
metrics.send(f"{function}.success", 'counter', 1)
|
||||
metrics.send(f"{function}.success", "counter", 1)
|
||||
return log_data
|
||||
|
||||
|
||||
@celery.task()
|
||||
|
@ -328,11 +434,11 @@ def clean_all_sources():
|
|||
current_app.logger.debug(log_data)
|
||||
clean_source.delay(source.label)
|
||||
|
||||
red.set(f'{function}.last_success', int(time.time()))
|
||||
metrics.send(f"{function}.success", 'counter', 1)
|
||||
metrics.send(f"{function}.success", "counter", 1)
|
||||
return log_data
|
||||
|
||||
|
||||
@celery.task(soft_time_limit=600)
|
||||
@celery.task(soft_time_limit=3600)
|
||||
def clean_source(source):
|
||||
"""
|
||||
This celery task will clean the specified source. This is a destructive operation that will delete unused
|
||||
|
@ -366,6 +472,7 @@ def clean_source(source):
|
|||
current_app.logger.error(log_data)
|
||||
sentry.captureException()
|
||||
metrics.send("celery.timeout", "counter", 1, metric_tags={"function": function})
|
||||
return log_data
|
||||
|
||||
|
||||
@celery.task()
|
||||
|
@ -395,8 +502,8 @@ def sync_all_sources():
|
|||
current_app.logger.debug(log_data)
|
||||
sync_source.delay(source.label)
|
||||
|
||||
red.set(f'{function}.last_success', int(time.time()))
|
||||
metrics.send(f"{function}.success", 'counter', 1)
|
||||
metrics.send(f"{function}.success", "counter", 1)
|
||||
return log_data
|
||||
|
||||
|
||||
@celery.task(soft_time_limit=7200)
|
||||
|
@ -428,19 +535,23 @@ def sync_source(source):
|
|||
current_app.logger.debug(log_data)
|
||||
try:
|
||||
sync([source])
|
||||
metrics.send(f"{function}.success", 'counter', 1, metric_tags={"source": source})
|
||||
metrics.send(
|
||||
f"{function}.success", "counter", 1, metric_tags={"source": source}
|
||||
)
|
||||
except SoftTimeLimitExceeded:
|
||||
log_data["message"] = "Error syncing source: Time limit exceeded."
|
||||
current_app.logger.error(log_data)
|
||||
sentry.captureException()
|
||||
metrics.send("sync_source_timeout", "counter", 1, metric_tags={"source": source})
|
||||
metrics.send(
|
||||
"sync_source_timeout", "counter", 1, metric_tags={"source": source}
|
||||
)
|
||||
metrics.send("celery.timeout", "counter", 1, metric_tags={"function": function})
|
||||
return
|
||||
|
||||
log_data["message"] = "Done syncing source"
|
||||
current_app.logger.debug(log_data)
|
||||
metrics.send(f"{function}.success", 'counter', 1, metric_tags={"source": source})
|
||||
red.set(f'{function}.last_success', int(time.time()))
|
||||
metrics.send(f"{function}.success", "counter", 1, metric_tags={"source": source})
|
||||
return log_data
|
||||
|
||||
|
||||
@celery.task()
|
||||
|
@ -477,8 +588,8 @@ def sync_source_destination():
|
|||
|
||||
log_data["message"] = "completed Syncing AWS destinations and sources"
|
||||
current_app.logger.debug(log_data)
|
||||
red.set(f'{function}.last_success', int(time.time()))
|
||||
metrics.send(f"{function}.success", 'counter', 1)
|
||||
metrics.send(f"{function}.success", "counter", 1)
|
||||
return log_data
|
||||
|
||||
|
||||
@celery.task(soft_time_limit=3600)
|
||||
|
@ -515,8 +626,8 @@ def certificate_reissue():
|
|||
|
||||
log_data["message"] = "reissuance completed"
|
||||
current_app.logger.debug(log_data)
|
||||
red.set(f'{function}.last_success', int(time.time()))
|
||||
metrics.send(f"{function}.success", 'counter', 1)
|
||||
metrics.send(f"{function}.success", "counter", 1)
|
||||
return log_data
|
||||
|
||||
|
||||
@celery.task(soft_time_limit=3600)
|
||||
|
@ -534,7 +645,6 @@ def certificate_rotate():
|
|||
"function": function,
|
||||
"message": "rotating certificates",
|
||||
"task_id": task_id,
|
||||
|
||||
}
|
||||
|
||||
if task_id and is_task_active(function, task_id, None):
|
||||
|
@ -554,8 +664,8 @@ def certificate_rotate():
|
|||
|
||||
log_data["message"] = "rotation completed"
|
||||
current_app.logger.debug(log_data)
|
||||
red.set(f'{function}.last_success', int(time.time()))
|
||||
metrics.send(f"{function}.success", 'counter', 1)
|
||||
metrics.send(f"{function}.success", "counter", 1)
|
||||
return log_data
|
||||
|
||||
|
||||
@celery.task(soft_time_limit=3600)
|
||||
|
@ -590,8 +700,8 @@ def endpoints_expire():
|
|||
metrics.send("celery.timeout", "counter", 1, metric_tags={"function": function})
|
||||
return
|
||||
|
||||
red.set(f'{function}.last_success', int(time.time()))
|
||||
metrics.send(f"{function}.success", 'counter', 1)
|
||||
metrics.send(f"{function}.success", "counter", 1)
|
||||
return log_data
|
||||
|
||||
|
||||
@celery.task(soft_time_limit=600)
|
||||
|
@ -626,8 +736,8 @@ def get_all_zones():
|
|||
metrics.send("celery.timeout", "counter", 1, metric_tags={"function": function})
|
||||
return
|
||||
|
||||
red.set(f'{function}.last_success', int(time.time()))
|
||||
metrics.send(f"{function}.success", 'counter', 1)
|
||||
metrics.send(f"{function}.success", "counter", 1)
|
||||
return log_data
|
||||
|
||||
|
||||
@celery.task(soft_time_limit=3600)
|
||||
|
@ -662,8 +772,8 @@ def check_revoked():
|
|||
metrics.send("celery.timeout", "counter", 1, metric_tags={"function": function})
|
||||
return
|
||||
|
||||
red.set(f'{function}.last_success', int(time.time()))
|
||||
metrics.send(f"{function}.success", 'counter', 1)
|
||||
metrics.send(f"{function}.success", "counter", 1)
|
||||
return log_data
|
||||
|
||||
|
||||
@celery.task(soft_time_limit=3600)
|
||||
|
@ -690,7 +800,9 @@ def notify_expirations():
|
|||
|
||||
current_app.logger.debug(log_data)
|
||||
try:
|
||||
cli_notification.expirations(current_app.config.get("EXCLUDE_CN_FROM_NOTIFICATION", []))
|
||||
cli_notification.expirations(
|
||||
current_app.config.get("EXCLUDE_CN_FROM_NOTIFICATION", [])
|
||||
)
|
||||
except SoftTimeLimitExceeded:
|
||||
log_data["message"] = "Notify expiring Time limit exceeded."
|
||||
current_app.logger.error(log_data)
|
||||
|
@ -698,5 +810,29 @@ def notify_expirations():
|
|||
metrics.send("celery.timeout", "counter", 1, metric_tags={"function": function})
|
||||
return
|
||||
|
||||
red.set(f'{function}.last_success', int(time.time()))
|
||||
metrics.send(f"{function}.success", 'counter', 1)
|
||||
metrics.send(f"{function}.success", "counter", 1)
|
||||
return log_data
|
||||
|
||||
|
||||
@celery.task(soft_time_limit=3600)
|
||||
def enable_autorotate_for_certs_attached_to_endpoint():
|
||||
"""
|
||||
This celery task automatically enables autorotation for unexpired certificates that are
|
||||
attached to an endpoint but do not have autorotate enabled.
|
||||
:return:
|
||||
"""
|
||||
function = f"{__name__}.{sys._getframe().f_code.co_name}"
|
||||
task_id = None
|
||||
if celery.current_task:
|
||||
task_id = celery.current_task.request.id
|
||||
|
||||
log_data = {
|
||||
"function": function,
|
||||
"task_id": task_id,
|
||||
"message": "Enabling autorotate to eligible certificates",
|
||||
}
|
||||
current_app.logger.debug(log_data)
|
||||
|
||||
cli_certificate.automatically_enable_autorotate()
|
||||
metrics.send(f"{function}.success", "counter", 1)
|
||||
return log_data
|
||||
|
|
|
@ -2,6 +2,7 @@ import re
|
|||
import unicodedata
|
||||
|
||||
from cryptography import x509
|
||||
from cryptography.hazmat.primitives.serialization import Encoding
|
||||
from flask import current_app
|
||||
|
||||
from lemur.common.utils import is_selfsigned
|
||||
|
@ -71,12 +72,20 @@ def common_name(cert):
|
|||
:return: Common name or None
|
||||
"""
|
||||
try:
|
||||
return cert.subject.get_attributes_for_oid(x509.OID_COMMON_NAME)[
|
||||
0
|
||||
].value.strip()
|
||||
subject_oid = cert.subject.get_attributes_for_oid(x509.OID_COMMON_NAME)
|
||||
if len(subject_oid) > 0:
|
||||
return subject_oid[0].value.strip()
|
||||
return None
|
||||
except Exception as e:
|
||||
sentry.captureException()
|
||||
current_app.logger.error("Unable to get common name! {0}".format(e))
|
||||
current_app.logger.error(
|
||||
{
|
||||
"message": "Unable to get common name",
|
||||
"error": e,
|
||||
"public_key": cert.public_bytes(Encoding.PEM).decode("utf-8")
|
||||
},
|
||||
exc_info=True
|
||||
)
|
||||
|
||||
|
||||
def organization(cert):
|
||||
|
|
|
@ -15,11 +15,11 @@ def convert_validity_years(data):
|
|||
now = arrow.utcnow()
|
||||
data["validity_start"] = now.isoformat()
|
||||
|
||||
end = now.replace(years=+int(data["validity_years"]))
|
||||
end = now.shift(years=+int(data["validity_years"]))
|
||||
|
||||
if not current_app.config.get("LEMUR_ALLOW_WEEKEND_EXPIRATION", True):
|
||||
if is_weekend(end):
|
||||
end = end.replace(days=-2)
|
||||
end = end.shift(days=-2)
|
||||
|
||||
data["validity_end"] = end.isoformat()
|
||||
return data
|
||||
|
|
|
@ -169,7 +169,12 @@ def validate_schema(input_schema, output_schema):
|
|||
if not resp:
|
||||
return dict(message="No data found"), 404
|
||||
|
||||
return unwrap_pagination(resp, output_schema), 200
|
||||
if callable(output_schema):
|
||||
output_schema_to_use = output_schema()
|
||||
else:
|
||||
output_schema_to_use = output_schema
|
||||
|
||||
return unwrap_pagination(resp, output_schema_to_use), 200
|
||||
|
||||
return decorated_function
|
||||
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
from flask_script import Manager
|
||||
|
||||
import sys
|
||||
|
||||
from lemur.constants import SUCCESS_METRIC_STATUS
|
||||
from lemur.dns_providers.service import get_all_dns_providers, set_domains
|
||||
from lemur.extensions import metrics
|
||||
from lemur.extensions import metrics, sentry
|
||||
from lemur.plugins.base import plugins
|
||||
|
||||
manager = Manager(
|
||||
|
@ -19,13 +21,20 @@ def get_all_zones():
|
|||
dns_providers = get_all_dns_providers()
|
||||
acme_plugin = plugins.get("acme-issuer")
|
||||
|
||||
function = f"{__name__}.{sys._getframe().f_code.co_name}"
|
||||
log_data = {
|
||||
"function": function,
|
||||
"message": "",
|
||||
}
|
||||
|
||||
for dns_provider in dns_providers:
|
||||
try:
|
||||
zones = acme_plugin.get_all_zones(dns_provider)
|
||||
set_domains(dns_provider, zones)
|
||||
except Exception as e:
|
||||
print("[+] Error with DNS Provider {}: {}".format(dns_provider.name, e))
|
||||
set_domains(dns_provider, [])
|
||||
log_data["message"] = f"get all zones failed for {dns_provider} {e}."
|
||||
sentry.captureException(extra=log_data)
|
||||
|
||||
status = SUCCESS_METRIC_STATUS
|
||||
|
||||
|
|
|
@ -99,6 +99,7 @@ def get_types():
|
|||
},
|
||||
{"name": "dyn"},
|
||||
{"name": "ultradns"},
|
||||
{"name": "powerdns"},
|
||||
]
|
||||
},
|
||||
)
|
||||
|
|
|
@ -0,0 +1,101 @@
|
|||
import sys
|
||||
import dns
|
||||
import dns.exception
|
||||
import dns.name
|
||||
import dns.query
|
||||
import dns.resolver
|
||||
import re
|
||||
|
||||
from lemur.extensions import sentry
|
||||
from lemur.extensions import metrics
|
||||
|
||||
|
||||
class DNSError(Exception):
|
||||
"""Base class for DNS Exceptions."""
|
||||
pass
|
||||
|
||||
|
||||
class BadDomainError(DNSError):
|
||||
"""Error for when a Bad Domain Name is given."""
|
||||
|
||||
def __init__(self, message):
|
||||
self.message = message
|
||||
|
||||
|
||||
class DNSResolveError(DNSError):
|
||||
"""Error for DNS Resolution Errors."""
|
||||
|
||||
def __init__(self, message):
|
||||
self.message = message
|
||||
|
||||
|
||||
def is_valid_domain(domain):
|
||||
"""Checks if a domain is syntactically valid and returns a bool"""
|
||||
if len(domain) > 253:
|
||||
return False
|
||||
if domain[-1] == ".":
|
||||
domain = domain[:-1]
|
||||
fqdn_re = re.compile("(?=^.{1,254}$)(^(?:(?!\d+\.|-)[a-zA-Z0-9_\-]{1,63}(?<!-)\.?)+(?:[a-zA-Z]{2,})$)", re.IGNORECASE)
|
||||
return all(fqdn_re.match(d) for d in domain.split("."))
|
||||
|
||||
|
||||
def get_authoritative_nameserver(domain):
|
||||
"""Get the authoritative nameservers for the given domain"""
|
||||
if not is_valid_domain(domain):
|
||||
raise BadDomainError(f"{domain} is not a valid FQDN")
|
||||
|
||||
n = dns.name.from_text(domain)
|
||||
|
||||
depth = 2
|
||||
default = dns.resolver.get_default_resolver()
|
||||
nameserver = default.nameservers[0]
|
||||
|
||||
last = False
|
||||
while not last:
|
||||
s = n.split(depth)
|
||||
|
||||
last = s[0].to_unicode() == u"@"
|
||||
sub = s[1]
|
||||
|
||||
query = dns.message.make_query(sub, dns.rdatatype.NS)
|
||||
response = dns.query.udp(query, nameserver)
|
||||
|
||||
rcode = response.rcode()
|
||||
if rcode != dns.rcode.NOERROR:
|
||||
function = sys._getframe().f_code.co_name
|
||||
metrics.send(f"{function}.error", "counter", 1)
|
||||
if rcode == dns.rcode.NXDOMAIN:
|
||||
raise DNSResolveError(f"{sub} does not exist.")
|
||||
else:
|
||||
raise DNSResolveError(f"Error: {dns.rcode.to_text(rcode)}")
|
||||
|
||||
if len(response.authority) > 0:
|
||||
rrset = response.authority[0]
|
||||
else:
|
||||
rrset = response.answer[0]
|
||||
|
||||
rr = rrset[0]
|
||||
if rr.rdtype != dns.rdatatype.SOA:
|
||||
authority = rr.target
|
||||
nameserver = default.query(authority).rrset[0].to_text()
|
||||
|
||||
depth += 1
|
||||
|
||||
return nameserver
|
||||
|
||||
|
||||
def get_dns_records(domain, rdtype, nameserver):
|
||||
"""Retrieves the DNS records matching the name and type and returns a list of records"""
|
||||
records = []
|
||||
try:
|
||||
dns_resolver = dns.resolver.Resolver()
|
||||
dns_resolver.nameservers = [nameserver]
|
||||
dns_response = dns_resolver.query(domain, rdtype)
|
||||
for rdata in dns_response:
|
||||
for record in rdata.strings:
|
||||
records.append(record.decode("utf-8"))
|
||||
except dns.exception.DNSException:
|
||||
sentry.captureException()
|
||||
function = sys._getframe().f_code.co_name
|
||||
metrics.send(f"{function}.fail", "counter", 1)
|
||||
return records
|
|
@ -17,6 +17,7 @@ from flask_migrate import Migrate, MigrateCommand, stamp
|
|||
from flask_script.commands import ShowUrls, Clean, Server
|
||||
|
||||
from lemur.dns_providers.cli import manager as dns_provider_manager
|
||||
from lemur.acme_providers.cli import manager as acme_manager
|
||||
from lemur.sources.cli import manager as source_manager
|
||||
from lemur.policies.cli import manager as policy_manager
|
||||
from lemur.reporting.cli import manager as report_manager
|
||||
|
@ -584,6 +585,7 @@ def main():
|
|||
manager.add_command("policy", policy_manager)
|
||||
manager.add_command("pending_certs", pending_certificate_manager)
|
||||
manager.add_command("dns_providers", dns_provider_manager)
|
||||
manager.add_command("acme", acme_manager)
|
||||
manager.run()
|
||||
|
||||
|
||||
|
|
|
@ -244,7 +244,7 @@ def render(args):
|
|||
)
|
||||
|
||||
if time_range:
|
||||
to = arrow.now().replace(weeks=+time_range).format("YYYY-MM-DD")
|
||||
to = arrow.now().shift(weeks=+time_range).format("YYYY-MM-DD")
|
||||
now = arrow.now().format("YYYY-MM-DD")
|
||||
query = query.filter(PendingCertificate.not_after <= to).filter(
|
||||
PendingCertificate.not_after >= now
|
||||
|
|
|
@ -31,7 +31,8 @@ from lemur.exceptions import InvalidAuthority, InvalidConfiguration, UnknownProv
|
|||
from lemur.extensions import metrics, sentry
|
||||
from lemur.plugins import lemur_acme as acme
|
||||
from lemur.plugins.bases import IssuerPlugin
|
||||
from lemur.plugins.lemur_acme import cloudflare, dyn, route53, ultradns
|
||||
from lemur.plugins.lemur_acme import cloudflare, dyn, route53, ultradns, powerdns
|
||||
from retrying import retry
|
||||
|
||||
|
||||
class AuthorizationRecord(object):
|
||||
|
@ -53,18 +54,30 @@ class AcmeHandler(object):
|
|||
current_app.logger.error(f"Unable to fetch DNS Providers: {e}")
|
||||
self.all_dns_providers = []
|
||||
|
||||
def find_dns_challenge(self, host, authorizations):
|
||||
def get_dns_challenges(self, host, authorizations):
|
||||
"""Get dns challenges for provided domain"""
|
||||
|
||||
domain_to_validate, is_wildcard = self.strip_wildcard(host)
|
||||
dns_challenges = []
|
||||
for authz in authorizations:
|
||||
if not authz.body.identifier.value.lower() == host.lower():
|
||||
if not authz.body.identifier.value.lower() == domain_to_validate.lower():
|
||||
continue
|
||||
if is_wildcard and not authz.body.wildcard:
|
||||
continue
|
||||
if not is_wildcard and authz.body.wildcard:
|
||||
continue
|
||||
for combo in authz.body.challenges:
|
||||
if isinstance(combo.chall, challenges.DNS01):
|
||||
dns_challenges.append(combo)
|
||||
|
||||
return dns_challenges
|
||||
|
||||
def maybe_remove_wildcard(self, host):
|
||||
return host.replace("*.", "")
|
||||
def strip_wildcard(self, host):
|
||||
"""Removes the leading *. and returns Host and whether it was removed or not (True/False)"""
|
||||
prefix = "*."
|
||||
if host.startswith(prefix):
|
||||
return host[len(prefix):], True
|
||||
return host, False
|
||||
|
||||
def maybe_add_extension(self, host, dns_provider_options):
|
||||
if dns_provider_options and dns_provider_options.get(
|
||||
|
@ -85,9 +98,8 @@ class AcmeHandler(object):
|
|||
current_app.logger.debug("Starting DNS challenge for {0}".format(host))
|
||||
|
||||
change_ids = []
|
||||
|
||||
host_to_validate = self.maybe_remove_wildcard(host)
|
||||
dns_challenges = self.find_dns_challenge(host_to_validate, order.authorizations)
|
||||
dns_challenges = self.get_dns_challenges(host, order.authorizations)
|
||||
host_to_validate, _ = self.strip_wildcard(host)
|
||||
host_to_validate = self.maybe_add_extension(
|
||||
host_to_validate, dns_provider_options
|
||||
)
|
||||
|
@ -171,7 +183,7 @@ class AcmeHandler(object):
|
|||
|
||||
except (AcmeError, TimeoutError):
|
||||
sentry.captureException(extra={"order_url": str(order.uri)})
|
||||
metrics.send("request_certificate_error", "counter", 1)
|
||||
metrics.send("request_certificate_error", "counter", 1, metric_tags={"uri": order.uri})
|
||||
current_app.logger.error(
|
||||
f"Unable to resolve Acme order: {order.uri}", exc_info=True
|
||||
)
|
||||
|
@ -182,6 +194,11 @@ class AcmeHandler(object):
|
|||
else:
|
||||
raise
|
||||
|
||||
metrics.send("request_certificate_success", "counter", 1, metric_tags={"uri": order.uri})
|
||||
current_app.logger.info(
|
||||
f"Successfully resolved Acme order: {order.uri}", exc_info=True
|
||||
)
|
||||
|
||||
pem_certificate = OpenSSL.crypto.dump_certificate(
|
||||
OpenSSL.crypto.FILETYPE_PEM,
|
||||
OpenSSL.crypto.load_certificate(
|
||||
|
@ -197,6 +214,7 @@ class AcmeHandler(object):
|
|||
)
|
||||
return pem_certificate, pem_certificate_chain
|
||||
|
||||
@retry(stop_max_attempt_number=5, wait_fixed=5000)
|
||||
def setup_acme_client(self, authority):
|
||||
if not authority.options:
|
||||
raise InvalidAuthority("Invalid authority. Options not set")
|
||||
|
@ -252,8 +270,9 @@ class AcmeHandler(object):
|
|||
|
||||
domains = [options["common_name"]]
|
||||
if options.get("extensions"):
|
||||
for name in options["extensions"]["sub_alt_names"]["names"]:
|
||||
domains.append(name)
|
||||
for dns_name in options["extensions"]["sub_alt_names"]["names"]:
|
||||
if dns_name.value not in domains:
|
||||
domains.append(dns_name.value)
|
||||
|
||||
current_app.logger.debug("Got these domains: {0}".format(domains))
|
||||
return domains
|
||||
|
@ -317,7 +336,7 @@ class AcmeHandler(object):
|
|||
)
|
||||
dns_provider_options = json.loads(dns_provider.credentials)
|
||||
account_number = dns_provider_options.get("account_id")
|
||||
host_to_validate = self.maybe_remove_wildcard(authz_record.host)
|
||||
host_to_validate, _ = self.strip_wildcard(authz_record.host)
|
||||
host_to_validate = self.maybe_add_extension(
|
||||
host_to_validate, dns_provider_options
|
||||
)
|
||||
|
@ -349,7 +368,7 @@ class AcmeHandler(object):
|
|||
dns_provider_options = json.loads(dns_provider.credentials)
|
||||
account_number = dns_provider_options.get("account_id")
|
||||
dns_challenges = authz_record.dns_challenge
|
||||
host_to_validate = self.maybe_remove_wildcard(authz_record.host)
|
||||
host_to_validate, _ = self.strip_wildcard(authz_record.host)
|
||||
host_to_validate = self.maybe_add_extension(
|
||||
host_to_validate, dns_provider_options
|
||||
)
|
||||
|
@ -375,6 +394,7 @@ class AcmeHandler(object):
|
|||
"dyn": dyn,
|
||||
"route53": route53,
|
||||
"ultradns": ultradns,
|
||||
"powerdns": powerdns
|
||||
}
|
||||
provider = provider_types.get(type)
|
||||
if not provider:
|
||||
|
@ -434,6 +454,7 @@ class ACMEIssuerPlugin(IssuerPlugin):
|
|||
"dyn": dyn,
|
||||
"route53": route53,
|
||||
"ultradns": ultradns,
|
||||
"powerdns": powerdns
|
||||
}
|
||||
provider = provider_types.get(type)
|
||||
if not provider:
|
||||
|
@ -636,15 +657,8 @@ class ACMEIssuerPlugin(IssuerPlugin):
|
|||
domains = self.acme.get_domains(issuer_options)
|
||||
if not create_immediately:
|
||||
# Create pending authorizations that we'll need to do the creation
|
||||
authz_domains = []
|
||||
for d in domains:
|
||||
if type(d) == str:
|
||||
authz_domains.append(d)
|
||||
else:
|
||||
authz_domains.append(d.value)
|
||||
|
||||
dns_authorization = authorization_service.create(
|
||||
account_number, authz_domains, provider_type
|
||||
account_number, domains, provider_type
|
||||
)
|
||||
# Return id of the DNS Authorization
|
||||
return None, None, dns_authorization.id
|
||||
|
|
|
@ -0,0 +1,436 @@
|
|||
import json
|
||||
import sys
|
||||
import time
|
||||
|
||||
import lemur.common.utils as utils
|
||||
import lemur.dns_providers.util as dnsutil
|
||||
import requests
|
||||
from flask import current_app
|
||||
from lemur.extensions import metrics, sentry
|
||||
|
||||
REQUIRED_VARIABLES = [
|
||||
"ACME_POWERDNS_APIKEYNAME",
|
||||
"ACME_POWERDNS_APIKEY",
|
||||
"ACME_POWERDNS_DOMAIN",
|
||||
]
|
||||
|
||||
|
||||
class Zone:
|
||||
"""
|
||||
This class implements a PowerDNS zone in JSON.
|
||||
"""
|
||||
|
||||
def __init__(self, _data):
|
||||
self._data = _data
|
||||
|
||||
@property
|
||||
def id(self):
|
||||
""" Zone id, has a trailing "." at the end, which we manually remove. """
|
||||
return self._data["id"][:-1]
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
""" Zone name, has a trailing "." at the end, which we manually remove. """
|
||||
return self._data["name"][:-1]
|
||||
|
||||
@property
|
||||
def kind(self):
|
||||
""" Indicates whether the zone is setup as a PRIMARY or SECONDARY """
|
||||
return self._data["kind"]
|
||||
|
||||
|
||||
class Record:
|
||||
"""
|
||||
This class implements a PowerDNS record.
|
||||
"""
|
||||
|
||||
def __init__(self, _data):
|
||||
self._data = _data
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self._data["name"]
|
||||
|
||||
@property
|
||||
def type(self):
|
||||
return self._data["type"]
|
||||
|
||||
@property
|
||||
def ttl(self):
|
||||
return self._data["ttl"]
|
||||
|
||||
@property
|
||||
def content(self):
|
||||
return self._data["content"]
|
||||
|
||||
@property
|
||||
def disabled(self):
|
||||
return self._data["disabled"]
|
||||
|
||||
|
||||
def get_zones(account_number):
|
||||
"""
|
||||
Retrieve authoritative zones from the PowerDNS API and return a list of zones
|
||||
|
||||
:param account_number:
|
||||
:raise: Exception
|
||||
:return: list of Zone Objects
|
||||
"""
|
||||
_check_conf()
|
||||
server_id = current_app.config.get("ACME_POWERDNS_SERVERID", "localhost")
|
||||
path = f"/api/v1/servers/{server_id}/zones"
|
||||
zones = []
|
||||
function = sys._getframe().f_code.co_name
|
||||
log_data = {
|
||||
"function": function
|
||||
}
|
||||
try:
|
||||
records = _get(path)
|
||||
log_data["message"] = "Retrieved Zones Successfully"
|
||||
current_app.logger.debug(log_data)
|
||||
|
||||
except Exception as e:
|
||||
sentry.captureException()
|
||||
log_data["message"] = "Failed to Retrieve Zone Data"
|
||||
current_app.logger.debug(log_data)
|
||||
raise
|
||||
|
||||
for record in records:
|
||||
zone = Zone(record)
|
||||
if zone.kind == 'Master':
|
||||
zones.append(zone.name)
|
||||
return zones
|
||||
|
||||
|
||||
def create_txt_record(domain, token, account_number):
|
||||
"""
|
||||
Create a TXT record for the given domain and token and return a change_id tuple
|
||||
|
||||
:param domain: FQDN
|
||||
:param token: challenge value
|
||||
:param account_number:
|
||||
:return: tuple of domain/token
|
||||
"""
|
||||
_check_conf()
|
||||
|
||||
function = sys._getframe().f_code.co_name
|
||||
log_data = {
|
||||
"function": function,
|
||||
"fqdn": domain,
|
||||
"token": token,
|
||||
}
|
||||
|
||||
# Create new record
|
||||
domain_id = domain + "."
|
||||
records = [Record({'name': domain_id, 'content': f"\"{token}\"", 'disabled': False})]
|
||||
|
||||
# Get current records
|
||||
cur_records = _get_txt_records(domain)
|
||||
for record in cur_records:
|
||||
if record.content != token:
|
||||
records.append(record)
|
||||
|
||||
try:
|
||||
_patch_txt_records(domain, account_number, records)
|
||||
log_data["message"] = "TXT record(s) successfully created"
|
||||
current_app.logger.debug(log_data)
|
||||
except Exception as e:
|
||||
sentry.captureException()
|
||||
log_data["Exception"] = e
|
||||
log_data["message"] = "Unable to create TXT record(s)"
|
||||
current_app.logger.debug(log_data)
|
||||
|
||||
change_id = (domain, token)
|
||||
return change_id
|
||||
|
||||
|
||||
def wait_for_dns_change(change_id, account_number=None):
|
||||
"""
|
||||
Checks the authoritative DNS Server to see if changes have propagated.
|
||||
|
||||
:param change_id: tuple of domain/token
|
||||
:param account_number:
|
||||
:return:
|
||||
"""
|
||||
_check_conf()
|
||||
domain, token = change_id
|
||||
number_of_attempts = current_app.config.get("ACME_POWERDNS_RETRIES", 3)
|
||||
zone_name = _get_zone_name(domain, account_number)
|
||||
nameserver = dnsutil.get_authoritative_nameserver(zone_name)
|
||||
record_found = False
|
||||
for attempts in range(0, number_of_attempts):
|
||||
txt_records = dnsutil.get_dns_records(domain, "TXT", nameserver)
|
||||
for txt_record in txt_records:
|
||||
if txt_record == token:
|
||||
record_found = True
|
||||
break
|
||||
if record_found:
|
||||
break
|
||||
time.sleep(10)
|
||||
|
||||
function = sys._getframe().f_code.co_name
|
||||
log_data = {
|
||||
"function": function,
|
||||
"fqdn": domain,
|
||||
"status": record_found,
|
||||
"message": "Record status on PowerDNS authoritative server"
|
||||
}
|
||||
current_app.logger.debug(log_data)
|
||||
|
||||
if record_found:
|
||||
metrics.send(f"{function}.success", "counter", 1, metric_tags={"fqdn": domain, "txt_record": token})
|
||||
else:
|
||||
metrics.send(f"{function}.fail", "counter", 1, metric_tags={"fqdn": domain, "txt_record": token})
|
||||
|
||||
|
||||
def delete_txt_record(change_id, account_number, domain, token):
|
||||
"""
|
||||
Delete the TXT record for the given domain and token
|
||||
|
||||
:param change_id: tuple of domain/token
|
||||
:param account_number:
|
||||
:param domain: FQDN
|
||||
:param token: challenge to delete
|
||||
:return:
|
||||
"""
|
||||
_check_conf()
|
||||
|
||||
function = sys._getframe().f_code.co_name
|
||||
log_data = {
|
||||
"function": function,
|
||||
"fqdn": domain,
|
||||
"token": token,
|
||||
}
|
||||
|
||||
"""
|
||||
Get existing TXT records matching the domain from DNS
|
||||
The token to be deleted should already exist
|
||||
There may be other records with different tokens as well
|
||||
"""
|
||||
cur_records = _get_txt_records(domain)
|
||||
found = False
|
||||
new_records = []
|
||||
for record in cur_records:
|
||||
if record.content == f"\"{token}\"":
|
||||
found = True
|
||||
else:
|
||||
new_records.append(record)
|
||||
|
||||
# Since the matching token is not in DNS, there is nothing to delete
|
||||
if not found:
|
||||
log_data["message"] = "Unable to delete TXT record: Token not found in existing TXT records"
|
||||
current_app.logger.debug(log_data)
|
||||
return
|
||||
|
||||
# The record to delete has been found AND there are other tokens set on the same domain
|
||||
# Since we only want to delete one token value from the RRSet, we need to use the Patch command to
|
||||
# overwrite the current RRSet with the existing records.
|
||||
elif new_records:
|
||||
try:
|
||||
_patch_txt_records(domain, account_number, new_records)
|
||||
log_data["message"] = "TXT record successfully deleted"
|
||||
current_app.logger.debug(log_data)
|
||||
except Exception as e:
|
||||
sentry.captureException()
|
||||
log_data["Exception"] = e
|
||||
log_data["message"] = "Unable to delete TXT record: patching exception"
|
||||
current_app.logger.debug(log_data)
|
||||
|
||||
# The record to delete has been found AND there are no other token values set on the same domain
|
||||
# Use the Delete command to delete the whole RRSet.
|
||||
else:
|
||||
zone_name = _get_zone_name(domain, account_number)
|
||||
server_id = current_app.config.get("ACME_POWERDNS_SERVERID", "localhost")
|
||||
zone_id = zone_name + "."
|
||||
domain_id = domain + "."
|
||||
path = f"/api/v1/servers/{server_id}/zones/{zone_id}"
|
||||
payload = {
|
||||
"rrsets": [
|
||||
{
|
||||
"name": domain_id,
|
||||
"type": "TXT",
|
||||
"ttl": 300,
|
||||
"changetype": "DELETE",
|
||||
"records": [
|
||||
{
|
||||
"content": f"\"{token}\"",
|
||||
"disabled": False
|
||||
}
|
||||
],
|
||||
"comments": []
|
||||
}
|
||||
]
|
||||
}
|
||||
function = sys._getframe().f_code.co_name
|
||||
log_data = {
|
||||
"function": function,
|
||||
"fqdn": domain,
|
||||
"token": token
|
||||
}
|
||||
try:
|
||||
_patch(path, payload)
|
||||
log_data["message"] = "TXT record successfully deleted"
|
||||
current_app.logger.debug(log_data)
|
||||
except Exception as e:
|
||||
sentry.captureException()
|
||||
log_data["Exception"] = e
|
||||
log_data["message"] = "Unable to delete TXT record"
|
||||
current_app.logger.debug(log_data)
|
||||
|
||||
|
||||
def _check_conf():
|
||||
"""
|
||||
Verifies required configuration variables are set
|
||||
|
||||
:return:
|
||||
"""
|
||||
utils.validate_conf(current_app, REQUIRED_VARIABLES)
|
||||
|
||||
|
||||
def _generate_header():
|
||||
"""
|
||||
Generate a PowerDNS API header and return it as a dictionary
|
||||
|
||||
:return: Dict of header parameters
|
||||
"""
|
||||
api_key_name = current_app.config.get("ACME_POWERDNS_APIKEYNAME")
|
||||
api_key = current_app.config.get("ACME_POWERDNS_APIKEY")
|
||||
headers = {api_key_name: api_key}
|
||||
return headers
|
||||
|
||||
|
||||
def _get_zone_name(domain, account_number):
|
||||
"""
|
||||
Get most specific matching zone for the given domain and return as a String
|
||||
|
||||
:param domain: FQDN
|
||||
:param account_number:
|
||||
:return: FQDN of domain
|
||||
"""
|
||||
zones = get_zones(account_number)
|
||||
zone_name = ""
|
||||
for z in zones:
|
||||
if domain.endswith(z):
|
||||
if z.count(".") > zone_name.count("."):
|
||||
zone_name = z
|
||||
if not zone_name:
|
||||
function = sys._getframe().f_code.co_name
|
||||
log_data = {
|
||||
"function": function,
|
||||
"fqdn": domain,
|
||||
"message": "No PowerDNS zone name found.",
|
||||
}
|
||||
metrics.send(f"{function}.fail", "counter", 1)
|
||||
return zone_name
|
||||
|
||||
|
||||
def _get_txt_records(domain):
|
||||
"""
|
||||
Retrieve TXT records for a given domain and return list of Record Objects
|
||||
|
||||
:param domain: FQDN
|
||||
:return: list of Record objects
|
||||
"""
|
||||
server_id = current_app.config.get("ACME_POWERDNS_SERVERID", "localhost")
|
||||
|
||||
path = f"/api/v1/servers/{server_id}/search-data?q={domain}&max=100&object_type=record"
|
||||
function = sys._getframe().f_code.co_name
|
||||
log_data = {
|
||||
"function": function
|
||||
}
|
||||
try:
|
||||
records = _get(path)
|
||||
log_data["message"] = "Retrieved TXT Records Successfully"
|
||||
current_app.logger.debug(log_data)
|
||||
|
||||
except Exception as e:
|
||||
sentry.captureException()
|
||||
log_data["Exception"] = e
|
||||
log_data["message"] = "Failed to Retrieve TXT Records"
|
||||
current_app.logger.debug(log_data)
|
||||
return []
|
||||
|
||||
txt_records = []
|
||||
for record in records:
|
||||
cur_record = Record(record)
|
||||
txt_records.append(cur_record)
|
||||
return txt_records
|
||||
|
||||
|
||||
def _get(path, params=None):
|
||||
"""
|
||||
Execute a GET request on the given URL (base_uri + path) and return response as JSON object
|
||||
|
||||
:param path: Relative URL path
|
||||
:param params: additional parameters
|
||||
:return: json response
|
||||
"""
|
||||
base_uri = current_app.config.get("ACME_POWERDNS_DOMAIN")
|
||||
verify_value = current_app.config.get("ACME_POWERDNS_VERIFY", True)
|
||||
resp = requests.get(
|
||||
f"{base_uri}{path}",
|
||||
headers=_generate_header(),
|
||||
params=params,
|
||||
verify=verify_value
|
||||
)
|
||||
resp.raise_for_status()
|
||||
return resp.json()
|
||||
|
||||
|
||||
def _patch_txt_records(domain, account_number, records):
|
||||
"""
|
||||
Send Patch request to PowerDNS Server
|
||||
|
||||
:param domain: FQDN
|
||||
:param account_number:
|
||||
:param records: List of Record objects
|
||||
:return:
|
||||
"""
|
||||
domain_id = domain + "."
|
||||
|
||||
# Create records
|
||||
txt_records = []
|
||||
for record in records:
|
||||
txt_records.append(
|
||||
{'content': record.content, 'disabled': record.disabled}
|
||||
)
|
||||
|
||||
# Create RRSet
|
||||
payload = {
|
||||
"rrsets": [
|
||||
{
|
||||
"name": domain_id,
|
||||
"type": "TXT",
|
||||
"ttl": 300,
|
||||
"changetype": "REPLACE",
|
||||
"records": txt_records,
|
||||
"comments": []
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
# Create Txt Records
|
||||
server_id = current_app.config.get("ACME_POWERDNS_SERVERID", "localhost")
|
||||
zone_name = _get_zone_name(domain, account_number)
|
||||
zone_id = zone_name + "."
|
||||
path = f"/api/v1/servers/{server_id}/zones/{zone_id}"
|
||||
_patch(path, payload)
|
||||
|
||||
|
||||
def _patch(path, payload):
|
||||
"""
|
||||
Execute a Patch request on the given URL (base_uri + path) with given payload
|
||||
|
||||
:param path:
|
||||
:param payload:
|
||||
:return:
|
||||
"""
|
||||
base_uri = current_app.config.get("ACME_POWERDNS_DOMAIN")
|
||||
verify_value = current_app.config.get("ACME_POWERDNS_VERIFY", True)
|
||||
resp = requests.patch(
|
||||
f"{base_uri}{path}",
|
||||
data=json.dumps(payload),
|
||||
headers=_generate_header(),
|
||||
verify=verify_value
|
||||
)
|
||||
resp.raise_for_status()
|
|
@ -35,6 +35,7 @@ def get_zones(client=None):
|
|||
zones = []
|
||||
for page in paginator.paginate():
|
||||
for zone in page["HostedZones"]:
|
||||
if not zone["Config"]["PrivateZone"]:
|
||||
zones.append(
|
||||
zone["Name"][:-1]
|
||||
) # We need [:-1] to strip out the trailing dot.
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
import unittest
|
||||
from requests.models import Response
|
||||
|
||||
from mock import MagicMock, Mock, patch
|
||||
from unittest.mock import patch, Mock
|
||||
|
||||
from cryptography.x509 import DNSName
|
||||
from lemur.plugins.lemur_acme import plugin, ultradns
|
||||
from mock import MagicMock
|
||||
from requests.models import Response
|
||||
|
||||
|
||||
class TestAcme(unittest.TestCase):
|
||||
|
@ -21,11 +22,12 @@ class TestAcme(unittest.TestCase):
|
|||
}
|
||||
|
||||
@patch("lemur.plugins.lemur_acme.plugin.len", return_value=1)
|
||||
def test_find_dns_challenge(self, mock_len):
|
||||
def test_get_dns_challenges(self, mock_len):
|
||||
assert mock_len
|
||||
|
||||
from acme import challenges
|
||||
|
||||
host = "example.com"
|
||||
c = challenges.DNS01()
|
||||
|
||||
mock_authz = Mock()
|
||||
|
@ -33,9 +35,18 @@ class TestAcme(unittest.TestCase):
|
|||
mock_entry = Mock()
|
||||
mock_entry.chall = c
|
||||
mock_authz.body.resolved_combinations.append(mock_entry)
|
||||
result = yield self.acme.find_dns_challenge(mock_authz)
|
||||
result = yield self.acme.get_dns_challenges(host, mock_authz)
|
||||
self.assertEqual(result, mock_entry)
|
||||
|
||||
def test_strip_wildcard(self):
|
||||
expected = ("example.com", False)
|
||||
result = self.acme.strip_wildcard("example.com")
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
expected = ("example.com", True)
|
||||
result = self.acme.strip_wildcard("*.example.com")
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_authz_record(self):
|
||||
a = plugin.AuthorizationRecord("host", "authz", "challenge", "id")
|
||||
self.assertEqual(type(a), plugin.AuthorizationRecord)
|
||||
|
@ -43,9 +54,9 @@ class TestAcme(unittest.TestCase):
|
|||
@patch("acme.client.Client")
|
||||
@patch("lemur.plugins.lemur_acme.plugin.current_app")
|
||||
@patch("lemur.plugins.lemur_acme.plugin.len", return_value=1)
|
||||
@patch("lemur.plugins.lemur_acme.plugin.AcmeHandler.find_dns_challenge")
|
||||
@patch("lemur.plugins.lemur_acme.plugin.AcmeHandler.get_dns_challenges")
|
||||
def test_start_dns_challenge(
|
||||
self, mock_find_dns_challenge, mock_len, mock_app, mock_acme
|
||||
self, mock_get_dns_challenges, mock_len, mock_app, mock_acme
|
||||
):
|
||||
assert mock_len
|
||||
mock_order = Mock()
|
||||
|
@ -63,7 +74,7 @@ class TestAcme(unittest.TestCase):
|
|||
mock_dns_provider.create_txt_record = Mock(return_value=1)
|
||||
|
||||
values = [mock_entry]
|
||||
iterable = mock_find_dns_challenge.return_value
|
||||
iterable = mock_get_dns_challenges.return_value
|
||||
iterator = iter(values)
|
||||
iterable.__iter__.return_value = iterator
|
||||
result = self.acme.start_dns_challenge(
|
||||
|
@ -74,12 +85,14 @@ class TestAcme(unittest.TestCase):
|
|||
@patch("acme.client.Client")
|
||||
@patch("lemur.plugins.lemur_acme.plugin.current_app")
|
||||
@patch("lemur.plugins.lemur_acme.cloudflare.wait_for_dns_change")
|
||||
@patch("time.sleep")
|
||||
def test_complete_dns_challenge_success(
|
||||
self, mock_wait_for_dns_change, mock_current_app, mock_acme
|
||||
self, mock_sleep, mock_wait_for_dns_change, mock_current_app, mock_acme
|
||||
):
|
||||
mock_dns_provider = Mock()
|
||||
mock_dns_provider.wait_for_dns_change = Mock(return_value=True)
|
||||
mock_authz = Mock()
|
||||
mock_sleep.return_value = False
|
||||
mock_authz.dns_challenge.response = Mock()
|
||||
mock_authz.dns_challenge.response.simple_verify = Mock(return_value=True)
|
||||
mock_authz.authz = []
|
||||
|
@ -123,12 +136,12 @@ class TestAcme(unittest.TestCase):
|
|||
@patch("acme.client.Client")
|
||||
@patch("OpenSSL.crypto", return_value="mock_cert")
|
||||
@patch("josepy.util.ComparableX509")
|
||||
@patch("lemur.plugins.lemur_acme.plugin.AcmeHandler.find_dns_challenge")
|
||||
@patch("lemur.plugins.lemur_acme.plugin.AcmeHandler.get_dns_challenges")
|
||||
@patch("lemur.plugins.lemur_acme.plugin.current_app")
|
||||
def test_request_certificate(
|
||||
self,
|
||||
mock_current_app,
|
||||
mock_find_dns_challenge,
|
||||
mock_get_dns_challenges,
|
||||
mock_jose,
|
||||
mock_crypto,
|
||||
mock_acme,
|
||||
|
@ -168,7 +181,7 @@ class TestAcme(unittest.TestCase):
|
|||
assert result_client
|
||||
assert result_registration
|
||||
|
||||
@patch("lemur.plugins.lemur_acme.plugin.current_app")
|
||||
@patch('lemur.plugins.lemur_acme.plugin.current_app')
|
||||
def test_get_domains_single(self, mock_current_app):
|
||||
options = {"common_name": "test.netflix.net"}
|
||||
result = self.acme.get_domains(options)
|
||||
|
@ -179,7 +192,7 @@ class TestAcme(unittest.TestCase):
|
|||
options = {
|
||||
"common_name": "test.netflix.net",
|
||||
"extensions": {
|
||||
"sub_alt_names": {"names": ["test2.netflix.net", "test3.netflix.net"]}
|
||||
"sub_alt_names": {"names": [DNSName("test2.netflix.net"), DNSName("test3.netflix.net")]}
|
||||
},
|
||||
}
|
||||
result = self.acme.get_domains(options)
|
||||
|
@ -187,6 +200,19 @@ class TestAcme(unittest.TestCase):
|
|||
result, [options["common_name"], "test2.netflix.net", "test3.netflix.net"]
|
||||
)
|
||||
|
||||
@patch("lemur.plugins.lemur_acme.plugin.current_app")
|
||||
def test_get_domains_san(self, mock_current_app):
|
||||
options = {
|
||||
"common_name": "test.netflix.net",
|
||||
"extensions": {
|
||||
"sub_alt_names": {"names": [DNSName("test.netflix.net"), DNSName("test2.netflix.net")]}
|
||||
},
|
||||
}
|
||||
result = self.acme.get_domains(options)
|
||||
self.assertEqual(
|
||||
result, [options["common_name"], "test2.netflix.net"]
|
||||
)
|
||||
|
||||
@patch(
|
||||
"lemur.plugins.lemur_acme.plugin.AcmeHandler.start_dns_challenge",
|
||||
return_value="test",
|
||||
|
@ -364,7 +390,7 @@ class TestAcme(unittest.TestCase):
|
|||
|
||||
@patch("lemur.plugins.lemur_acme.ultradns.requests")
|
||||
@patch("lemur.plugins.lemur_acme.ultradns.current_app")
|
||||
def test_get_ultradns_token(self, mock_current_app, mock_requests):
|
||||
def test_ultradns_get_token(self, mock_current_app, mock_requests):
|
||||
# ret_val = json.dumps({"access_token": "access"})
|
||||
the_response = Response()
|
||||
the_response._content = b'{"access_token": "access"}'
|
||||
|
@ -374,7 +400,7 @@ class TestAcme(unittest.TestCase):
|
|||
self.assertTrue(len(result) > 0)
|
||||
|
||||
@patch("lemur.plugins.lemur_acme.ultradns.current_app")
|
||||
def test_create_txt_record(self, mock_current_app):
|
||||
def test_ultradns_create_txt_record(self, mock_current_app):
|
||||
domain = "_acme_challenge.test.example.com"
|
||||
zone = "test.example.com"
|
||||
token = "ABCDEFGHIJ"
|
||||
|
@ -395,7 +421,7 @@ class TestAcme(unittest.TestCase):
|
|||
|
||||
@patch("lemur.plugins.lemur_acme.ultradns.current_app")
|
||||
@patch("lemur.extensions.metrics")
|
||||
def test_delete_txt_record(self, mock_metrics, mock_current_app):
|
||||
def test_ultradns_delete_txt_record(self, mock_metrics, mock_current_app):
|
||||
domain = "_acme_challenge.test.example.com"
|
||||
zone = "test.example.com"
|
||||
token = "ABCDEFGHIJ"
|
||||
|
@ -418,7 +444,7 @@ class TestAcme(unittest.TestCase):
|
|||
|
||||
@patch("lemur.plugins.lemur_acme.ultradns.current_app")
|
||||
@patch("lemur.extensions.metrics")
|
||||
def test_wait_for_dns_change(self, mock_metrics, mock_current_app):
|
||||
def test_ultradns_wait_for_dns_change(self, mock_metrics, mock_current_app):
|
||||
ultradns._has_dns_propagated = Mock(return_value=True)
|
||||
nameserver = "1.1.1.1"
|
||||
ultradns.get_authoritative_nameserver = Mock(return_value=nameserver)
|
||||
|
@ -437,7 +463,7 @@ class TestAcme(unittest.TestCase):
|
|||
}
|
||||
mock_current_app.logger.debug.assert_called_with(log_data)
|
||||
|
||||
def test_get_zone_name(self):
|
||||
def test_ultradns_get_zone_name(self):
|
||||
zones = ['example.com', 'test.example.com']
|
||||
zone = "test.example.com"
|
||||
domain = "_acme-challenge.test.example.com"
|
||||
|
@ -446,7 +472,7 @@ class TestAcme(unittest.TestCase):
|
|||
result = ultradns.get_zone_name(domain, account_number)
|
||||
self.assertEqual(result, zone)
|
||||
|
||||
def test_get_zones(self):
|
||||
def test_ultradns_get_zones(self):
|
||||
account_number = "1234567890"
|
||||
path = "a/b/c"
|
||||
zones = ['example.com', 'test.example.com']
|
||||
|
|
|
@ -0,0 +1,171 @@
|
|||
import unittest
|
||||
from unittest.mock import patch, Mock
|
||||
from lemur.plugins.lemur_acme import plugin, powerdns
|
||||
|
||||
|
||||
class TestPowerdns(unittest.TestCase):
|
||||
@patch("lemur.plugins.lemur_acme.plugin.dns_provider_service")
|
||||
def setUp(self, mock_dns_provider_service):
|
||||
self.ACMEIssuerPlugin = plugin.ACMEIssuerPlugin()
|
||||
self.acme = plugin.AcmeHandler()
|
||||
mock_dns_provider = Mock()
|
||||
mock_dns_provider.name = "powerdns"
|
||||
mock_dns_provider.credentials = "{}"
|
||||
mock_dns_provider.provider_type = "powerdns"
|
||||
self.acme.dns_providers_for_domain = {
|
||||
"www.test.com": [mock_dns_provider],
|
||||
"test.fakedomain.net": [mock_dns_provider],
|
||||
}
|
||||
|
||||
@patch("lemur.plugins.lemur_acme.powerdns.current_app")
|
||||
def test_get_zones(self, mock_current_app):
|
||||
account_number = "1234567890"
|
||||
path = "a/b/c"
|
||||
zones = ['example.com', 'test.example.com']
|
||||
get_response = [{'account': '', 'dnssec': 'False', 'id': 'example.com.', 'kind': 'Master', 'last_check': 0, 'masters': [],
|
||||
'name': 'example.com.', 'notified_serial': '2019111907', 'serial': '2019111907',
|
||||
'url': '/api/v1/servers/localhost/zones/example.com.'},
|
||||
{'account': '', 'dnssec': 'False', 'id': 'bad.example.com.', 'kind': 'Secondary', 'last_check': 0, 'masters': [],
|
||||
'name': 'bad.example.com.', 'notified_serial': '2018053104', 'serial': '2018053104',
|
||||
'url': '/api/v1/servers/localhost/zones/bad.example.com.'},
|
||||
{'account': '', 'dnssec': 'False', 'id': 'test.example.com.', 'kind': 'Master', 'last_check': 0,
|
||||
'masters': [], 'name': 'test.example.com.', 'notified_serial': '2019112501', 'serial': '2019112501',
|
||||
'url': '/api/v1/servers/localhost/zones/test.example.com.'}]
|
||||
powerdns._check_conf = Mock()
|
||||
powerdns._get = Mock(path)
|
||||
powerdns._get.side_effect = [get_response]
|
||||
mock_current_app.config.get = Mock(return_value="localhost")
|
||||
result = powerdns.get_zones(account_number)
|
||||
self.assertEqual(result, zones)
|
||||
|
||||
def test_get_zone_name(self):
|
||||
zones = ['example.com', 'test.example.com']
|
||||
zone = "test.example.com"
|
||||
domain = "_acme-challenge.test.example.com"
|
||||
account_number = "1234567890"
|
||||
powerdns.get_zones = Mock(return_value=zones)
|
||||
result = powerdns._get_zone_name(domain, account_number)
|
||||
self.assertEqual(result, zone)
|
||||
|
||||
@patch("lemur.plugins.lemur_acme.powerdns.current_app")
|
||||
def test_create_txt_record_write_only(self, mock_current_app):
|
||||
domain = "_acme_challenge.test.example.com"
|
||||
zone = "test.example.com"
|
||||
token = "ABCDEFGHIJ"
|
||||
account_number = "1234567890"
|
||||
change_id = (domain, token)
|
||||
powerdns._check_conf = Mock()
|
||||
powerdns._get_txt_records = Mock(return_value=[])
|
||||
powerdns._get_zone_name = Mock(return_value=zone)
|
||||
mock_current_app.logger.debug = Mock()
|
||||
mock_current_app.config.get = Mock(return_value="localhost")
|
||||
powerdns._patch = Mock()
|
||||
log_data = {
|
||||
"function": "create_txt_record",
|
||||
"fqdn": domain,
|
||||
"token": token,
|
||||
"message": "TXT record(s) successfully created"
|
||||
}
|
||||
result = powerdns.create_txt_record(domain, token, account_number)
|
||||
mock_current_app.logger.debug.assert_called_with(log_data)
|
||||
self.assertEqual(result, change_id)
|
||||
|
||||
@patch("lemur.plugins.lemur_acme.powerdns.current_app")
|
||||
def test_create_txt_record_append(self, mock_current_app):
|
||||
domain = "_acme_challenge.test.example.com"
|
||||
zone = "test.example.com"
|
||||
token = "ABCDEFGHIJ"
|
||||
account_number = "1234567890"
|
||||
change_id = (domain, token)
|
||||
powerdns._check_conf = Mock()
|
||||
cur_token = "123456"
|
||||
cur_records = [powerdns.Record({'name': domain, 'content': f"\"{cur_token}\"", 'disabled': False})]
|
||||
powerdns._get_txt_records = Mock(return_value=cur_records)
|
||||
powerdns._get_zone_name = Mock(return_value=zone)
|
||||
mock_current_app.logger.debug = Mock()
|
||||
mock_current_app.config.get = Mock(return_value="localhost")
|
||||
powerdns._patch = Mock()
|
||||
log_data = {
|
||||
"function": "create_txt_record",
|
||||
"fqdn": domain,
|
||||
"token": token,
|
||||
"message": "TXT record(s) successfully created"
|
||||
}
|
||||
expected_path = f"/api/v1/servers/localhost/zones/test.example.com."
|
||||
expected_payload = {
|
||||
"rrsets": [
|
||||
{
|
||||
"name": domain + ".",
|
||||
"type": "TXT",
|
||||
"ttl": 300,
|
||||
"changetype": "REPLACE",
|
||||
"records": [
|
||||
{
|
||||
"content": f"\"{token}\"",
|
||||
"disabled": False
|
||||
},
|
||||
{
|
||||
"content": f"\"{cur_token}\"",
|
||||
"disabled": False
|
||||
}
|
||||
],
|
||||
"comments": []
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
result = powerdns.create_txt_record(domain, token, account_number)
|
||||
mock_current_app.logger.debug.assert_called_with(log_data)
|
||||
powerdns._patch.assert_called_with(expected_path, expected_payload)
|
||||
self.assertEqual(result, change_id)
|
||||
|
||||
@patch("lemur.plugins.lemur_acme.powerdns.dnsutil")
|
||||
@patch("lemur.plugins.lemur_acme.powerdns.current_app")
|
||||
@patch("lemur.extensions.metrics")
|
||||
@patch("time.sleep")
|
||||
def test_wait_for_dns_change(self, mock_sleep, mock_metrics, mock_current_app, mock_dnsutil):
|
||||
domain = "_acme-challenge.test.example.com"
|
||||
token1 = "ABCDEFG"
|
||||
token2 = "HIJKLMN"
|
||||
zone_name = "test.example.com"
|
||||
nameserver = "1.1.1.1"
|
||||
change_id = (domain, token1)
|
||||
powerdns._check_conf = Mock()
|
||||
mock_records = (token2, token1)
|
||||
mock_current_app.config.get = Mock(return_value=1)
|
||||
powerdns._get_zone_name = Mock(return_value=zone_name)
|
||||
mock_dnsutil.get_authoritative_nameserver = Mock(return_value=nameserver)
|
||||
mock_dnsutil.get_dns_records = Mock(return_value=mock_records)
|
||||
mock_sleep.return_value = False
|
||||
mock_metrics.send = Mock()
|
||||
mock_current_app.logger.debug = Mock()
|
||||
powerdns.wait_for_dns_change(change_id)
|
||||
|
||||
log_data = {
|
||||
"function": "wait_for_dns_change",
|
||||
"fqdn": domain,
|
||||
"status": True,
|
||||
"message": "Record status on PowerDNS authoritative server"
|
||||
}
|
||||
mock_current_app.logger.debug.assert_called_with(log_data)
|
||||
|
||||
@patch("lemur.plugins.lemur_acme.powerdns.current_app")
|
||||
def test_delete_txt_record(self, mock_current_app):
|
||||
domain = "_acme_challenge.test.example.com"
|
||||
zone = "test.example.com"
|
||||
token = "ABCDEFGHIJ"
|
||||
account_number = "1234567890"
|
||||
change_id = (domain, token)
|
||||
powerdns._check_conf = Mock()
|
||||
powerdns._get_zone_name = Mock(return_value=zone)
|
||||
mock_current_app.logger.debug = Mock()
|
||||
mock_current_app.config.get = Mock(return_value="localhost")
|
||||
powerdns._patch = Mock()
|
||||
log_data = {
|
||||
"function": "delete_txt_record",
|
||||
"fqdn": domain,
|
||||
"token": token,
|
||||
"message": "Unable to delete TXT record: Token not found in existing TXT records"
|
||||
}
|
||||
powerdns.delete_txt_record(change_id, account_number, domain, token)
|
||||
mock_current_app.logger.debug.assert_called_with(log_data)
|
|
@ -46,7 +46,7 @@ class ADCSIssuerPlugin(IssuerPlugin):
|
|||
)
|
||||
current_app.logger.info("Requesting CSR: {0}".format(csr))
|
||||
current_app.logger.info("Issuer options: {0}".format(issuer_options))
|
||||
cert, req_id = (
|
||||
cert = (
|
||||
ca_server.get_cert(csr, adcs_template, encoding="b64")
|
||||
.decode("utf-8")
|
||||
.replace("\r\n", "\n")
|
||||
|
@ -54,7 +54,7 @@ class ADCSIssuerPlugin(IssuerPlugin):
|
|||
chain = (
|
||||
ca_server.get_ca_cert(encoding="b64").decode("utf-8").replace("\r\n", "\n")
|
||||
)
|
||||
return cert, chain, req_id
|
||||
return cert, chain, None
|
||||
|
||||
def revoke_certificate(self, certificate, comments):
|
||||
raise NotImplementedError("Not implemented\n", self, certificate, comments)
|
||||
|
|
|
@ -0,0 +1,4 @@
|
|||
try:
|
||||
VERSION = __import__("pkg_resources").get_distribution(__name__).version
|
||||
except Exception as e:
|
||||
VERSION = "unknown"
|
|
@ -0,0 +1,97 @@
|
|||
"""
|
||||
.. module: lemur.plugins.lemur_atlas_redis.plugin
|
||||
:platform: Unix
|
||||
:copyright: (c) 2018 by Netflix Inc., see AUTHORS for more
|
||||
:license: Apache, see LICENSE for more details.
|
||||
|
||||
.. moduleauthor:: Jay Zarfoss
|
||||
"""
|
||||
|
||||
from redis import Redis
|
||||
import json
|
||||
from datetime import datetime
|
||||
|
||||
from flask import current_app
|
||||
from lemur.plugins import lemur_atlas as atlas
|
||||
from lemur.plugins.bases.metric import MetricPlugin
|
||||
|
||||
|
||||
def millis_since_epoch():
|
||||
"""
|
||||
current time since epoch in milliseconds
|
||||
"""
|
||||
epoch = datetime.utcfromtimestamp(0)
|
||||
delta = datetime.now() - epoch
|
||||
return int(delta.total_seconds() * 1000.0)
|
||||
|
||||
|
||||
class AtlasMetricRedisPlugin(MetricPlugin):
|
||||
title = "AtlasRedis"
|
||||
slug = "atlas-metric-redis"
|
||||
description = "Adds support for sending key metrics to Atlas via local Redis"
|
||||
version = atlas.VERSION
|
||||
|
||||
author = "Jay Zarfoss"
|
||||
author_url = "https://github.com/netflix/lemur"
|
||||
|
||||
options = [
|
||||
{
|
||||
"name": "redis_host",
|
||||
"type": "str",
|
||||
"required": False,
|
||||
"help_message": "If no host is provided localhost is assumed",
|
||||
"default": "localhost",
|
||||
},
|
||||
{"name": "redis_port", "type": "int", "required": False, "default": 28527},
|
||||
]
|
||||
|
||||
metric_data = {}
|
||||
redis_host = None
|
||||
redis_port = None
|
||||
|
||||
def submit(
|
||||
self, metric_name, metric_type, metric_value, metric_tags=None, options=None
|
||||
):
|
||||
if not options:
|
||||
options = self.options
|
||||
|
||||
valid_types = ["COUNTER", "GAUGE", "TIMER"]
|
||||
if metric_type.upper() not in valid_types:
|
||||
raise Exception(
|
||||
"Invalid Metric Type for Atlas: '{metric}' choose from: {options}".format(
|
||||
metric=metric_type, options=",".join(valid_types)
|
||||
)
|
||||
)
|
||||
|
||||
if metric_tags:
|
||||
if not isinstance(metric_tags, dict):
|
||||
raise Exception(
|
||||
"Invalid Metric Tags for Atlas: Tags must be in dict format"
|
||||
)
|
||||
|
||||
self.metric_data["timestamp"] = millis_since_epoch()
|
||||
self.metric_data["type"] = metric_type.upper()
|
||||
self.metric_data["name"] = str(metric_name)
|
||||
self.metric_data["tags"] = metric_tags
|
||||
|
||||
if (
|
||||
metric_value == "NaN"
|
||||
or isinstance(metric_value, int)
|
||||
or isinstance(metric_value, float)
|
||||
):
|
||||
self.metric_data["value"] = metric_value
|
||||
else:
|
||||
raise Exception("Invalid Metric Value for Atlas: Metric must be a number")
|
||||
|
||||
self.redis_host = self.get_option("redis_host", options)
|
||||
self.redis_port = self.get_option("redis_port", options)
|
||||
|
||||
try:
|
||||
r = Redis(host=self.redis_host, port=self.redis_port, socket_timeout=0.1)
|
||||
r.rpush('atlas-agent', json.dumps(self.metric_data))
|
||||
except Exception as e:
|
||||
current_app.logger.warning(
|
||||
"AtlasMetricsRedis: exception [{exception}] could not post atlas metrics to AtlasRedis [{host}:{port}], metric [{metricdata}]".format(
|
||||
exception=e, host=self.redis_host, port=self.redis_port, metricdata=json.dumps(self.metric_data)
|
||||
)
|
||||
)
|
|
@ -10,7 +10,7 @@ import botocore
|
|||
|
||||
from retrying import retry
|
||||
|
||||
from lemur.extensions import metrics
|
||||
from lemur.extensions import metrics, sentry
|
||||
from lemur.plugins.lemur_aws.sts import sts_client
|
||||
|
||||
|
||||
|
@ -24,6 +24,12 @@ def retry_throttled(exception):
|
|||
if exception.response["Error"]["Code"] == "NoSuchEntity":
|
||||
return False
|
||||
|
||||
# No need to retry deletion requests if there is a DeleteConflict error.
|
||||
# This error indicates that the certificate is still attached to an entity
|
||||
# and cannot be deleted.
|
||||
if exception.response["Error"]["Code"] == "DeleteConflict":
|
||||
return False
|
||||
|
||||
metrics.send("iam_retry", "counter", 1, metric_tags={"exception": str(exception)})
|
||||
return True
|
||||
|
||||
|
@ -122,9 +128,11 @@ def get_certificate(name, **kwargs):
|
|||
"""
|
||||
client = kwargs.pop("client")
|
||||
metrics.send("get_certificate", "counter", 1, metric_tags={"name": name})
|
||||
return client.get_server_certificate(ServerCertificateName=name)[
|
||||
"ServerCertificate"
|
||||
]
|
||||
try:
|
||||
return client.get_server_certificate(ServerCertificateName=name)["ServerCertificate"]
|
||||
except client.exceptions.NoSuchEntityException:
|
||||
sentry.captureException()
|
||||
return None
|
||||
|
||||
|
||||
@sts_client("iam")
|
||||
|
|
|
@ -32,7 +32,9 @@
|
|||
.. moduleauthor:: Mikhail Khodorovskiy <mikhail.khodorovskiy@jivesoftware.com>
|
||||
.. moduleauthor:: Harm Weites <harm@weites.com>
|
||||
"""
|
||||
from acme.errors import ClientError
|
||||
from flask import current_app
|
||||
from lemur.extensions import sentry, metrics
|
||||
|
||||
from lemur.plugins import lemur_aws as aws
|
||||
from lemur.plugins.bases import DestinationPlugin, ExportDestinationPlugin, SourcePlugin
|
||||
|
@ -40,7 +42,12 @@ from lemur.plugins.lemur_aws import iam, s3, elb, ec2
|
|||
|
||||
|
||||
def get_region_from_dns(dns):
|
||||
# XXX.REGION.elb.amazonaws.com
|
||||
if dns.endswith(".elb.amazonaws.com"):
|
||||
return dns.split(".")[-4]
|
||||
else:
|
||||
# NLBs have a different pattern on the dns XXXX.elb.REGION.amazonaws.com
|
||||
return dns.split(".")[-3]
|
||||
|
||||
|
||||
def format_elb_cipher_policy_v2(policy):
|
||||
|
@ -205,26 +212,28 @@ class AWSSourcePlugin(SourcePlugin):
|
|||
if not regions:
|
||||
regions = ec2.get_regions(account_number=account_number)
|
||||
else:
|
||||
regions = regions.split(",")
|
||||
regions = "".join(regions.split()).split(",")
|
||||
|
||||
for region in regions:
|
||||
elbs = elb.get_all_elbs(account_number=account_number, region=region)
|
||||
current_app.logger.info(
|
||||
"Describing classic load balancers in {0}-{1}".format(
|
||||
account_number, region
|
||||
)
|
||||
)
|
||||
current_app.logger.info({
|
||||
"message": "Describing classic load balancers",
|
||||
"account_number": account_number,
|
||||
"region": region,
|
||||
"number_of_load_balancers": len(elbs)
|
||||
})
|
||||
|
||||
for e in elbs:
|
||||
endpoints.extend(get_elb_endpoints(account_number, region, e))
|
||||
|
||||
# fetch advanced ELBs
|
||||
elbs_v2 = elb.get_all_elbs_v2(account_number=account_number, region=region)
|
||||
current_app.logger.info(
|
||||
"Describing advanced load balancers in {0}-{1}".format(
|
||||
account_number, region
|
||||
)
|
||||
)
|
||||
current_app.logger.info({
|
||||
"message": "Describing advanced load balancers",
|
||||
"account_number": account_number,
|
||||
"region": region,
|
||||
"number_of_load_balancers": len(elbs_v2)
|
||||
})
|
||||
|
||||
for e in elbs_v2:
|
||||
endpoints.extend(get_elb_endpoints_v2(account_number, region, e))
|
||||
|
@ -266,6 +275,29 @@ class AWSSourcePlugin(SourcePlugin):
|
|||
account_number = self.get_option("accountNumber", options)
|
||||
iam.delete_cert(certificate.name, account_number=account_number)
|
||||
|
||||
def get_certificate_by_name(self, certificate_name, options):
|
||||
account_number = self.get_option("accountNumber", options)
|
||||
# certificate name may contain path, in which case we remove it
|
||||
if "/" in certificate_name:
|
||||
certificate_name = certificate_name.split('/')[-1]
|
||||
try:
|
||||
cert = iam.get_certificate(certificate_name, account_number=account_number)
|
||||
if cert:
|
||||
return dict(
|
||||
body=cert["CertificateBody"],
|
||||
chain=cert.get("CertificateChain"),
|
||||
name=cert["ServerCertificateMetadata"]["ServerCertificateName"],
|
||||
)
|
||||
except ClientError:
|
||||
current_app.logger.warning(
|
||||
"get_elb_certificate_failed: Unable to get certificate for {0}".format(certificate_name))
|
||||
sentry.captureException()
|
||||
metrics.send(
|
||||
"get_elb_certificate_failed", "counter", 1,
|
||||
metric_tags={"certificate_name": certificate_name, "account_number": account_number}
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
class AWSDestinationPlugin(DestinationPlugin):
|
||||
title = "AWS"
|
||||
|
@ -295,6 +327,7 @@ class AWSDestinationPlugin(DestinationPlugin):
|
|||
]
|
||||
|
||||
def upload(self, name, body, private_key, cert_chain, options, **kwargs):
|
||||
try:
|
||||
iam.upload_cert(
|
||||
name,
|
||||
body,
|
||||
|
@ -303,6 +336,8 @@ class AWSDestinationPlugin(DestinationPlugin):
|
|||
cert_chain=cert_chain,
|
||||
account_number=self.get_option("accountNumber", options),
|
||||
)
|
||||
except ClientError:
|
||||
sentry.captureException()
|
||||
|
||||
def deploy(self, elb_name, account, region, certificate):
|
||||
pass
|
||||
|
|
|
@ -56,7 +56,7 @@ class CfsslIssuerPlugin(IssuerPlugin):
|
|||
try:
|
||||
hex_key = current_app.config.get("CFSSL_KEY")
|
||||
key = bytes.fromhex(hex_key)
|
||||
except (ValueError, NameError):
|
||||
except (ValueError, NameError, TypeError):
|
||||
# unable to find CFSSL_KEY in config, continue using normal sign method
|
||||
pass
|
||||
else:
|
||||
|
|
|
@ -14,21 +14,17 @@
|
|||
.. moduleauthor:: Kevin Glisson <kglisson@netflix.com>
|
||||
"""
|
||||
import json
|
||||
|
||||
import arrow
|
||||
import requests
|
||||
|
||||
import pem
|
||||
from retrying import retry
|
||||
|
||||
from flask import current_app
|
||||
|
||||
import requests
|
||||
from cryptography import x509
|
||||
|
||||
from lemur.extensions import metrics
|
||||
from flask import current_app
|
||||
from lemur.common.utils import validate_conf
|
||||
from lemur.plugins.bases import IssuerPlugin, SourcePlugin
|
||||
|
||||
from lemur.extensions import metrics
|
||||
from lemur.plugins import lemur_digicert as digicert
|
||||
from lemur.plugins.bases import IssuerPlugin, SourcePlugin
|
||||
from retrying import retry
|
||||
|
||||
|
||||
def log_status_code(r, *args, **kwargs):
|
||||
|
@ -64,24 +60,37 @@ def signature_hash(signing_algorithm):
|
|||
raise Exception("Unsupported signing algorithm.")
|
||||
|
||||
|
||||
def determine_validity_years(end_date):
|
||||
def determine_validity_years(years):
|
||||
"""Given an end date determine how many years into the future that date is.
|
||||
:param years:
|
||||
:return: validity in years
|
||||
"""
|
||||
default_years = current_app.config.get("DIGICERT_DEFAULT_VALIDITY", 1)
|
||||
max_years = current_app.config.get("DIGICERT_MAX_VALIDITY", default_years)
|
||||
|
||||
if years > max_years:
|
||||
return max_years
|
||||
if years not in [1, 2, 3]:
|
||||
return default_years
|
||||
return years
|
||||
|
||||
|
||||
def determine_end_date(end_date):
|
||||
"""
|
||||
Determine appropriate end date
|
||||
|
||||
:param end_date:
|
||||
:return: str validity in years
|
||||
:return: validity_end
|
||||
"""
|
||||
now = arrow.utcnow()
|
||||
default_years = current_app.config.get("DIGICERT_DEFAULT_VALIDITY", 1)
|
||||
max_validity_end = arrow.utcnow().shift(years=current_app.config.get("DIGICERT_MAX_VALIDITY", default_years))
|
||||
|
||||
if end_date < now.replace(years=+1):
|
||||
return 1
|
||||
elif end_date < now.replace(years=+2):
|
||||
return 2
|
||||
elif end_date < now.replace(years=+3):
|
||||
return 3
|
||||
if not end_date:
|
||||
end_date = arrow.utcnow().shift(years=default_years)
|
||||
|
||||
raise Exception(
|
||||
"DigiCert issued certificates cannot exceed three" " years in validity"
|
||||
)
|
||||
if end_date > max_validity_end:
|
||||
end_date = max_validity_end
|
||||
return end_date
|
||||
|
||||
|
||||
def get_additional_names(options):
|
||||
|
@ -107,12 +116,6 @@ def map_fields(options, csr):
|
|||
:param csr:
|
||||
:return: dict or valid DigiCert options
|
||||
"""
|
||||
if not options.get("validity_years"):
|
||||
if not options.get("validity_end"):
|
||||
options["validity_years"] = current_app.config.get(
|
||||
"DIGICERT_DEFAULT_VALIDITY", 1
|
||||
)
|
||||
|
||||
data = dict(
|
||||
certificate={
|
||||
"common_name": options["common_name"],
|
||||
|
@ -125,9 +128,11 @@ def map_fields(options, csr):
|
|||
data["certificate"]["dns_names"] = get_additional_names(options)
|
||||
|
||||
if options.get("validity_years"):
|
||||
data["validity_years"] = options["validity_years"]
|
||||
data["validity_years"] = determine_validity_years(options.get("validity_years"))
|
||||
elif options.get("validity_end"):
|
||||
data["custom_expiration_date"] = determine_end_date(options.get("validity_end")).format("YYYY-MM-DD")
|
||||
else:
|
||||
data["custom_expiration_date"] = options["validity_end"].format("YYYY-MM-DD")
|
||||
data["validity_years"] = determine_validity_years(0)
|
||||
|
||||
if current_app.config.get("DIGICERT_PRIVATE", False):
|
||||
if "product" in data:
|
||||
|
@ -144,18 +149,15 @@ def map_cis_fields(options, csr):
|
|||
|
||||
:param options:
|
||||
:param csr:
|
||||
:return:
|
||||
:return: data
|
||||
"""
|
||||
if not options.get("validity_years"):
|
||||
if not options.get("validity_end"):
|
||||
options["validity_end"] = arrow.utcnow().replace(
|
||||
years=current_app.config.get("DIGICERT_DEFAULT_VALIDITY", 1)
|
||||
)
|
||||
options["validity_years"] = determine_validity_years(options["validity_end"])
|
||||
|
||||
if options.get("validity_years"):
|
||||
validity_end = determine_end_date(arrow.utcnow().shift(years=options["validity_years"]))
|
||||
elif options.get("validity_end"):
|
||||
validity_end = determine_end_date(options.get("validity_end"))
|
||||
else:
|
||||
options["validity_end"] = arrow.utcnow().replace(
|
||||
years=options["validity_years"]
|
||||
)
|
||||
validity_end = determine_end_date(False)
|
||||
|
||||
data = {
|
||||
"profile_name": current_app.config.get("DIGICERT_CIS_PROFILE_NAMES", {}).get(options['authority'].name),
|
||||
|
@ -164,13 +166,17 @@ def map_cis_fields(options, csr):
|
|||
"csr": csr,
|
||||
"signature_hash": signature_hash(options.get("signing_algorithm")),
|
||||
"validity": {
|
||||
"valid_to": options["validity_end"].format("YYYY-MM-DDTHH:MM") + "Z"
|
||||
"valid_to": validity_end.format("YYYY-MM-DDTHH:MM") + "Z"
|
||||
},
|
||||
"organization": {
|
||||
"name": options["organization"],
|
||||
"units": [options["organizational_unit"]],
|
||||
},
|
||||
}
|
||||
# possibility to default to a SIGNING_ALGORITHM for a given profile
|
||||
if current_app.config.get("DIGICERT_CIS_SIGNING_ALGORITHMS", {}).get(options['authority'].name):
|
||||
data["signature_hash"] = current_app.config.get("DIGICERT_CIS_SIGNING_ALGORITHMS", {}).get(
|
||||
options['authority'].name)
|
||||
|
||||
return data
|
||||
|
||||
|
|
|
@ -1,20 +1,81 @@
|
|||
import pytest
|
||||
import arrow
|
||||
import json
|
||||
from unittest.mock import patch
|
||||
from unittest.mock import patch, Mock
|
||||
|
||||
import arrow
|
||||
import pytest
|
||||
from cryptography import x509
|
||||
from freezegun import freeze_time
|
||||
|
||||
from lemur.plugins.lemur_digicert import plugin
|
||||
from lemur.tests.vectors import CSR_STR
|
||||
|
||||
from cryptography import x509
|
||||
|
||||
def config_mock(*args):
|
||||
values = {
|
||||
"DIGICERT_ORG_ID": 111111,
|
||||
"DIGICERT_PRIVATE": False,
|
||||
"DIGICERT_DEFAULT_SIGNING_ALGORITHM": "sha256",
|
||||
"DIGICERT_DEFAULT_VALIDITY": 1,
|
||||
"DIGICERT_MAX_VALIDITY": 2,
|
||||
"DIGICERT_CIS_PROFILE_NAMES": {"digicert": 'digicert'},
|
||||
"DIGICERT_CIS_SIGNING_ALGORITHMS": {"digicert": 'digicert'},
|
||||
}
|
||||
return values[args[0]]
|
||||
|
||||
|
||||
def test_map_fields_with_validity_end_and_start(app):
|
||||
from lemur.plugins.lemur_digicert.plugin import map_fields
|
||||
@patch("lemur.plugins.lemur_digicert.plugin.current_app")
|
||||
def test_determine_validity_years(mock_current_app):
|
||||
mock_current_app.config.get = Mock(return_value=2)
|
||||
assert plugin.determine_validity_years(1) == 1
|
||||
assert plugin.determine_validity_years(0) == 2
|
||||
assert plugin.determine_validity_years(3) == 2
|
||||
|
||||
|
||||
@patch("lemur.plugins.lemur_digicert.plugin.current_app")
|
||||
def test_determine_end_date(mock_current_app):
|
||||
mock_current_app.config.get = Mock(return_value=2)
|
||||
with freeze_time(time_to_freeze=arrow.get(2016, 11, 3).datetime):
|
||||
assert arrow.get(2018, 11, 3) == plugin.determine_end_date(0)
|
||||
assert arrow.get(2018, 5, 7) == plugin.determine_end_date(arrow.get(2018, 5, 7))
|
||||
assert arrow.get(2018, 11, 3) == plugin.determine_end_date(arrow.get(2020, 5, 7))
|
||||
|
||||
|
||||
@patch("lemur.plugins.lemur_digicert.plugin.current_app")
|
||||
def test_map_fields_with_validity_years(mock_current_app):
|
||||
mock_current_app.config.get = Mock(side_effect=config_mock)
|
||||
|
||||
with patch('lemur.plugins.lemur_digicert.plugin.signature_hash') as mock_signature_hash:
|
||||
mock_signature_hash.return_value = "sha256"
|
||||
|
||||
names = [u"one.example.com", u"two.example.com", u"three.example.com"]
|
||||
options = {
|
||||
"common_name": "example.com",
|
||||
"owner": "bob@example.com",
|
||||
"description": "test certificate",
|
||||
"extensions": {"sub_alt_names": {"names": [x509.DNSName(x) for x in names]}},
|
||||
"validity_years": 2
|
||||
}
|
||||
expected = {
|
||||
"certificate": {
|
||||
"csr": CSR_STR,
|
||||
"common_name": "example.com",
|
||||
"dns_names": names,
|
||||
"signature_hash": "sha256",
|
||||
},
|
||||
"organization": {"id": 111111},
|
||||
"validity_years": 2,
|
||||
}
|
||||
assert expected == plugin.map_fields(options, CSR_STR)
|
||||
|
||||
|
||||
@patch("lemur.plugins.lemur_digicert.plugin.current_app")
|
||||
def test_map_fields_with_validity_end_and_start(mock_current_app):
|
||||
mock_current_app.config.get = Mock(side_effect=config_mock)
|
||||
plugin.determine_end_date = Mock(return_value=arrow.get(2017, 5, 7))
|
||||
|
||||
with patch('lemur.plugins.lemur_digicert.plugin.signature_hash') as mock_signature_hash:
|
||||
mock_signature_hash.return_value = "sha256"
|
||||
|
||||
names = [u"one.example.com", u"two.example.com", u"three.example.com"]
|
||||
options = {
|
||||
"common_name": "example.com",
|
||||
"owner": "bob@example.com",
|
||||
|
@ -24,9 +85,7 @@ def test_map_fields_with_validity_end_and_start(app):
|
|||
"validity_start": arrow.get(2016, 10, 30),
|
||||
}
|
||||
|
||||
data = map_fields(options, CSR_STR)
|
||||
|
||||
assert data == {
|
||||
expected = {
|
||||
"certificate": {
|
||||
"csr": CSR_STR,
|
||||
"common_name": "example.com",
|
||||
|
@ -37,66 +96,18 @@ def test_map_fields_with_validity_end_and_start(app):
|
|||
"custom_expiration_date": arrow.get(2017, 5, 7).format("YYYY-MM-DD"),
|
||||
}
|
||||
|
||||
assert expected == plugin.map_fields(options, CSR_STR)
|
||||
|
||||
def test_map_fields_with_validity_years(app):
|
||||
from lemur.plugins.lemur_digicert.plugin import map_fields
|
||||
|
||||
@patch("lemur.plugins.lemur_digicert.plugin.current_app")
|
||||
def test_map_cis_fields_with_validity_years(mock_current_app, authority):
|
||||
mock_current_app.config.get = Mock(side_effect=config_mock)
|
||||
plugin.determine_end_date = Mock(return_value=arrow.get(2018, 11, 3))
|
||||
|
||||
with patch('lemur.plugins.lemur_digicert.plugin.signature_hash') as mock_signature_hash:
|
||||
mock_signature_hash.return_value = "sha256"
|
||||
|
||||
names = [u"one.example.com", u"two.example.com", u"three.example.com"]
|
||||
|
||||
options = {
|
||||
"common_name": "example.com",
|
||||
"owner": "bob@example.com",
|
||||
"description": "test certificate",
|
||||
"extensions": {"sub_alt_names": {"names": [x509.DNSName(x) for x in names]}},
|
||||
"validity_years": 2,
|
||||
"validity_end": arrow.get(2017, 10, 30),
|
||||
}
|
||||
|
||||
data = map_fields(options, CSR_STR)
|
||||
|
||||
assert data == {
|
||||
"certificate": {
|
||||
"csr": CSR_STR,
|
||||
"common_name": "example.com",
|
||||
"dns_names": names,
|
||||
"signature_hash": "sha256",
|
||||
},
|
||||
"organization": {"id": 111111},
|
||||
"validity_years": 2,
|
||||
}
|
||||
|
||||
|
||||
def test_map_cis_fields(app, authority):
|
||||
from lemur.plugins.lemur_digicert.plugin import map_cis_fields
|
||||
|
||||
names = [u"one.example.com", u"two.example.com", u"three.example.com"]
|
||||
|
||||
options = {
|
||||
"common_name": "example.com",
|
||||
"owner": "bob@example.com",
|
||||
"description": "test certificate",
|
||||
"extensions": {"sub_alt_names": {"names": [x509.DNSName(x) for x in names]}},
|
||||
"organization": "Example, Inc.",
|
||||
"organizational_unit": "Example Org",
|
||||
"validity_end": arrow.get(2017, 5, 7),
|
||||
"validity_start": arrow.get(2016, 10, 30),
|
||||
"authority": authority,
|
||||
}
|
||||
|
||||
data = map_cis_fields(options, CSR_STR)
|
||||
|
||||
assert data == {
|
||||
"common_name": "example.com",
|
||||
"csr": CSR_STR,
|
||||
"additional_dns_names": names,
|
||||
"signature_hash": "sha256",
|
||||
"organization": {"name": "Example, Inc.", "units": ["Example Org"]},
|
||||
"validity": {
|
||||
"valid_to": arrow.get(2017, 5, 7).format("YYYY-MM-DDTHH:MM") + "Z"
|
||||
},
|
||||
"profile_name": None,
|
||||
}
|
||||
|
||||
options = {
|
||||
"common_name": "example.com",
|
||||
"owner": "bob@example.com",
|
||||
|
@ -108,10 +119,7 @@ def test_map_cis_fields(app, authority):
|
|||
"authority": authority,
|
||||
}
|
||||
|
||||
with freeze_time(time_to_freeze=arrow.get(2016, 11, 3).datetime):
|
||||
data = map_cis_fields(options, CSR_STR)
|
||||
|
||||
assert data == {
|
||||
expected = {
|
||||
"common_name": "example.com",
|
||||
"csr": CSR_STR,
|
||||
"additional_dns_names": names,
|
||||
|
@ -123,17 +131,55 @@ def test_map_cis_fields(app, authority):
|
|||
"profile_name": None,
|
||||
}
|
||||
|
||||
assert expected == plugin.map_cis_fields(options, CSR_STR)
|
||||
|
||||
def test_signature_hash(app):
|
||||
from lemur.plugins.lemur_digicert.plugin import signature_hash
|
||||
|
||||
assert signature_hash(None) == "sha256"
|
||||
assert signature_hash("sha256WithRSA") == "sha256"
|
||||
assert signature_hash("sha384WithRSA") == "sha384"
|
||||
assert signature_hash("sha512WithRSA") == "sha512"
|
||||
@patch("lemur.plugins.lemur_digicert.plugin.current_app")
|
||||
def test_map_cis_fields_with_validity_end_and_start(mock_current_app, app, authority):
|
||||
mock_current_app.config.get = Mock(side_effect=config_mock)
|
||||
plugin.determine_end_date = Mock(return_value=arrow.get(2017, 5, 7))
|
||||
|
||||
with patch('lemur.plugins.lemur_digicert.plugin.signature_hash') as mock_signature_hash:
|
||||
mock_signature_hash.return_value = "sha256"
|
||||
|
||||
names = [u"one.example.com", u"two.example.com", u"three.example.com"]
|
||||
options = {
|
||||
"common_name": "example.com",
|
||||
"owner": "bob@example.com",
|
||||
"description": "test certificate",
|
||||
"extensions": {"sub_alt_names": {"names": [x509.DNSName(x) for x in names]}},
|
||||
"organization": "Example, Inc.",
|
||||
"organizational_unit": "Example Org",
|
||||
"validity_end": arrow.get(2017, 5, 7),
|
||||
"validity_start": arrow.get(2016, 10, 30),
|
||||
"authority": authority
|
||||
}
|
||||
|
||||
expected = {
|
||||
"common_name": "example.com",
|
||||
"csr": CSR_STR,
|
||||
"additional_dns_names": names,
|
||||
"signature_hash": "sha256",
|
||||
"organization": {"name": "Example, Inc.", "units": ["Example Org"]},
|
||||
"validity": {
|
||||
"valid_to": arrow.get(2017, 5, 7).format("YYYY-MM-DDTHH:MM") + "Z"
|
||||
},
|
||||
"profile_name": None,
|
||||
}
|
||||
|
||||
assert expected == plugin.map_cis_fields(options, CSR_STR)
|
||||
|
||||
|
||||
@patch("lemur.plugins.lemur_digicert.plugin.current_app")
|
||||
def test_signature_hash(mock_current_app, app):
|
||||
mock_current_app.config.get = Mock(side_effect=config_mock)
|
||||
assert plugin.signature_hash(None) == "sha256"
|
||||
assert plugin.signature_hash("sha256WithRSA") == "sha256"
|
||||
assert plugin.signature_hash("sha384WithRSA") == "sha384"
|
||||
assert plugin.signature_hash("sha512WithRSA") == "sha512"
|
||||
|
||||
with pytest.raises(Exception):
|
||||
signature_hash("sdfdsf")
|
||||
plugin.signature_hash("sdfdsf")
|
||||
|
||||
|
||||
def test_issuer_plugin_create_certificate(
|
||||
|
|
|
@ -106,7 +106,13 @@
|
|||
</tr>
|
||||
<tr>
|
||||
<td style="font-family:Roboto-Regular,Helvetica,Arial,sans-serif;font-size:13px;color:#202020;line-height:1.5">
|
||||
If the above certificates are still in use. You should re-issue and deploy new certificates as soon as possible.</span>
|
||||
Your action is required if the above certificates are still needed for your service.
|
||||
<br><br>
|
||||
If your endpoints are still in use, you can access your certificate in Lemur, and enable Auto Rotate under the Action->Edit menu.
|
||||
Lemur will take care of re-issuance and rotation of the certificate on the listed endpoints within one day.
|
||||
<br><br>
|
||||
If your certificate is deployed with your service, you should re-issue and manually deploy a new certificate as soon as possible.
|
||||
</span>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
|
|
|
@ -96,7 +96,7 @@ def build_secret(secret_format, secret_name, body, private_key, cert_chain):
|
|||
if secret_format == "TLS":
|
||||
secret["type"] = "kubernetes.io/tls"
|
||||
secret["data"] = {
|
||||
"tls.crt": base64encode(cert_chain),
|
||||
"tls.crt": base64encode(body),
|
||||
"tls.key": base64encode(private_key),
|
||||
}
|
||||
if secret_format == "Certificate":
|
||||
|
|
|
@ -170,6 +170,15 @@ class SFTPDestinationPlugin(DestinationPlugin):
|
|||
current_app.logger.debug(
|
||||
"Uploading {0} to {1}".format(filename, dst_path_cn)
|
||||
)
|
||||
try:
|
||||
with sftp.open(dst_path_cn + "/" + filename, "w") as f:
|
||||
f.write(data)
|
||||
except (PermissionError) as permerror:
|
||||
if permerror.errno == 13:
|
||||
current_app.logger.debug(
|
||||
"Uploading {0} to {1} returned Permission Denied Error, making file writable and retrying".format(filename, dst_path_cn)
|
||||
)
|
||||
sftp.chmod(dst_path_cn + "/" + filename, 0o600)
|
||||
with sftp.open(dst_path_cn + "/" + filename, "w") as f:
|
||||
f.write(data)
|
||||
# read only for owner, -r--------
|
||||
|
|
|
@ -50,11 +50,19 @@ class VaultSourcePlugin(SourcePlugin):
|
|||
"helpMessage": "Version of the Vault KV API to use",
|
||||
},
|
||||
{
|
||||
"name": "vaultAuthTokenFile",
|
||||
"name": "authenticationMethod",
|
||||
"type": "select",
|
||||
"value": "token",
|
||||
"available": ["token", "kubernetes"],
|
||||
"required": True,
|
||||
"helpMessage": "Authentication method to use",
|
||||
},
|
||||
{
|
||||
"name": "tokenFile/VaultRole",
|
||||
"type": "str",
|
||||
"required": True,
|
||||
"validation": "(/[^/]+)+",
|
||||
"helpMessage": "Must be a valid file path!",
|
||||
"validation": "^([a-zA-Z0-9/._-]+/?)+$",
|
||||
"helpMessage": "Must be vaild file path for token based auth and valid role if k8s based auth",
|
||||
},
|
||||
{
|
||||
"name": "vaultMount",
|
||||
|
@ -85,7 +93,8 @@ class VaultSourcePlugin(SourcePlugin):
|
|||
cert = []
|
||||
body = ""
|
||||
url = self.get_option("vaultUrl", options)
|
||||
token_file = self.get_option("vaultAuthTokenFile", options)
|
||||
auth_method = self.get_option("authenticationMethod", options)
|
||||
auth_key = self.get_option("tokenFile/vaultRole", options)
|
||||
mount = self.get_option("vaultMount", options)
|
||||
path = self.get_option("vaultPath", options)
|
||||
obj_name = self.get_option("objectName", options)
|
||||
|
@ -93,10 +102,18 @@ class VaultSourcePlugin(SourcePlugin):
|
|||
cert_filter = "-----BEGIN CERTIFICATE-----"
|
||||
cert_delimiter = "-----END CERTIFICATE-----"
|
||||
|
||||
with open(token_file, "r") as tfile:
|
||||
client = hvac.Client(url=url)
|
||||
if auth_method == 'token':
|
||||
with open(auth_key, "r") as tfile:
|
||||
token = tfile.readline().rstrip("\n")
|
||||
client.token = token
|
||||
|
||||
if auth_method == 'kubernetes':
|
||||
token_path = '/var/run/secrets/kubernetes.io/serviceaccount/token'
|
||||
with open(token_path, 'r') as f:
|
||||
jwt = f.read()
|
||||
client.auth_kubernetes(auth_key, jwt)
|
||||
|
||||
client = hvac.Client(url=url, token=token)
|
||||
client.secrets.kv.default_kv_version = api_version
|
||||
|
||||
path = "{0}/{1}".format(path, obj_name)
|
||||
|
@ -160,11 +177,19 @@ class VaultDestinationPlugin(DestinationPlugin):
|
|||
"helpMessage": "Version of the Vault KV API to use",
|
||||
},
|
||||
{
|
||||
"name": "vaultAuthTokenFile",
|
||||
"name": "authenticationMethod",
|
||||
"type": "select",
|
||||
"value": "token",
|
||||
"available": ["token", "kubernetes"],
|
||||
"required": True,
|
||||
"helpMessage": "Authentication method to use",
|
||||
},
|
||||
{
|
||||
"name": "tokenFile/VaultRole",
|
||||
"type": "str",
|
||||
"required": True,
|
||||
"validation": "(/[^/]+)+",
|
||||
"helpMessage": "Must be a valid file path!",
|
||||
"validation": "^([a-zA-Z0-9/._-]+/?)+$",
|
||||
"helpMessage": "Must be vaild file path for token based auth and valid role if k8s based auth",
|
||||
},
|
||||
{
|
||||
"name": "vaultMount",
|
||||
|
@ -219,7 +244,8 @@ class VaultDestinationPlugin(DestinationPlugin):
|
|||
cname = common_name(parse_certificate(body))
|
||||
|
||||
url = self.get_option("vaultUrl", options)
|
||||
token_file = self.get_option("vaultAuthTokenFile", options)
|
||||
auth_method = self.get_option("authenticationMethod", options)
|
||||
auth_key = self.get_option("tokenFile/vaultRole", options)
|
||||
mount = self.get_option("vaultMount", options)
|
||||
path = self.get_option("vaultPath", options)
|
||||
bundle = self.get_option("bundleChain", options)
|
||||
|
@ -245,10 +271,18 @@ class VaultDestinationPlugin(DestinationPlugin):
|
|||
exc_info=True,
|
||||
)
|
||||
|
||||
with open(token_file, "r") as tfile:
|
||||
client = hvac.Client(url=url)
|
||||
if auth_method == 'token':
|
||||
with open(auth_key, "r") as tfile:
|
||||
token = tfile.readline().rstrip("\n")
|
||||
client.token = token
|
||||
|
||||
if auth_method == 'kubernetes':
|
||||
token_path = '/var/run/secrets/kubernetes.io/serviceaccount/token'
|
||||
with open(token_path, 'r') as f:
|
||||
jwt = f.read()
|
||||
client.auth_kubernetes(auth_key, jwt)
|
||||
|
||||
client = hvac.Client(url=url, token=token)
|
||||
client.secrets.kv.default_kv_version = api_version
|
||||
|
||||
if obj_name:
|
||||
|
|
|
@ -98,10 +98,14 @@ def process_options(options):
|
|||
:param options:
|
||||
:return: dict or valid verisign options
|
||||
"""
|
||||
# if there is a config variable with VERISIGN_PRODUCT_<upper(authority.name)> take the value as Cert product-type
|
||||
# else default to "Server", to be compatoible with former versions
|
||||
authority = options.get("authority").name.upper()
|
||||
product_type = current_app.config.get("VERISIGN_PRODUCT_{0}".format(authority), "Server")
|
||||
data = {
|
||||
"challenge": get_psuedo_random_string(),
|
||||
"serverType": "Apache",
|
||||
"certProductType": "Server",
|
||||
"certProductType": product_type,
|
||||
"firstName": current_app.config.get("VERISIGN_FIRST_NAME"),
|
||||
"lastName": current_app.config.get("VERISIGN_LAST_NAME"),
|
||||
"signatureAlgorithm": "sha256WithRSAEncryption",
|
||||
|
@ -111,16 +115,9 @@ def process_options(options):
|
|||
|
||||
data["subject_alt_names"] = ",".join(get_additional_names(options))
|
||||
|
||||
if options.get("validity_end") > arrow.utcnow().replace(years=2):
|
||||
raise Exception(
|
||||
"Verisign issued certificates cannot exceed two years in validity"
|
||||
)
|
||||
|
||||
if options.get("validity_end"):
|
||||
# VeriSign (Symantec) only accepts strictly smaller than 2 year end date
|
||||
if options.get("validity_end") < arrow.utcnow().replace(years=2).replace(
|
||||
days=-1
|
||||
):
|
||||
if options.get("validity_end") < arrow.utcnow().shift(years=2, days=-1):
|
||||
period = get_default_issuance(options)
|
||||
data["specificEndDate"] = options["validity_end"].format("MM/DD/YYYY")
|
||||
data["validityPeriod"] = period
|
||||
|
@ -149,9 +146,9 @@ def get_default_issuance(options):
|
|||
"""
|
||||
now = arrow.utcnow()
|
||||
|
||||
if options["validity_end"] < now.replace(years=+1):
|
||||
if options["validity_end"] < now.shift(years=+1):
|
||||
validity_period = "1Y"
|
||||
elif options["validity_end"] < now.replace(years=+2):
|
||||
elif options["validity_end"] < now.shift(years=+2):
|
||||
validity_period = "2Y"
|
||||
else:
|
||||
raise Exception(
|
||||
|
@ -212,7 +209,7 @@ class VerisignIssuerPlugin(IssuerPlugin):
|
|||
|
||||
response = self.session.post(url, data=data)
|
||||
try:
|
||||
cert = handle_response(response.content)["Response"]["Certificate"]
|
||||
response_dict = handle_response(response.content)
|
||||
except KeyError:
|
||||
metrics.send(
|
||||
"verisign_create_certificate_error",
|
||||
|
@ -224,8 +221,13 @@ class VerisignIssuerPlugin(IssuerPlugin):
|
|||
extra={"common_name": issuer_options.get("common_name", "")}
|
||||
)
|
||||
raise Exception(f"Error with Verisign: {response.content}")
|
||||
# TODO add external id
|
||||
return cert, current_app.config.get("VERISIGN_INTERMEDIATE"), None
|
||||
authority = issuer_options.get("authority").name.upper()
|
||||
cert = response_dict['Response']['Certificate']
|
||||
external_id = None
|
||||
if 'Transaction_ID' in response_dict['Response'].keys():
|
||||
external_id = response_dict['Response']['Transaction_ID']
|
||||
chain = current_app.config.get("VERISIGN_INTERMEDIATE_{0}".format(authority), current_app.config.get("VERISIGN_INTERMEDIATE"))
|
||||
return cert, chain, external_id
|
||||
|
||||
@staticmethod
|
||||
def create_authority(options):
|
||||
|
@ -261,7 +263,7 @@ class VerisignIssuerPlugin(IssuerPlugin):
|
|||
url = current_app.config.get("VERISIGN_URL") + "/reportingws"
|
||||
|
||||
end = arrow.now()
|
||||
start = end.replace(days=-7)
|
||||
start = end.shift(days=-7)
|
||||
|
||||
data = {
|
||||
"reportType": "detail",
|
||||
|
@ -299,7 +301,7 @@ class VerisignSourcePlugin(SourcePlugin):
|
|||
def get_certificates(self):
|
||||
url = current_app.config.get("VERISIGN_URL") + "/reportingws"
|
||||
end = arrow.now()
|
||||
start = end.replace(years=-5)
|
||||
start = end.shift(years=-5)
|
||||
data = {
|
||||
"reportType": "detail",
|
||||
"startDate": start.format("MM/DD/YYYY"),
|
||||
|
|
|
@ -54,6 +54,24 @@ def validate_sources(source_strings):
|
|||
return sources
|
||||
|
||||
|
||||
def execute_clean(plugin, certificate, source):
|
||||
try:
|
||||
plugin.clean(certificate, source.options)
|
||||
certificate.sources.remove(source)
|
||||
|
||||
# If we want to remove the source from the certificate, we also need to clear any equivalent destinations to
|
||||
# prevent Lemur from re-uploading the certificate.
|
||||
for destination in certificate.destinations:
|
||||
if destination.label == source.label:
|
||||
certificate.destinations.remove(destination)
|
||||
|
||||
certificate_service.database.update(certificate)
|
||||
return SUCCESS_METRIC_STATUS
|
||||
except Exception as e:
|
||||
current_app.logger.exception(e)
|
||||
sentry.captureException()
|
||||
|
||||
|
||||
@manager.option(
|
||||
"-s",
|
||||
"--sources",
|
||||
|
@ -132,11 +150,9 @@ def clean(source_strings, commit):
|
|||
s = plugins.get(source.plugin_name)
|
||||
|
||||
if not hasattr(s, "clean"):
|
||||
print(
|
||||
"Cannot clean source: {0}, source plugin does not implement 'clean()'".format(
|
||||
source.label
|
||||
)
|
||||
)
|
||||
info_text = f"Cannot clean source: {source.label}, source plugin does not implement 'clean()'"
|
||||
current_app.logger.warning(info_text)
|
||||
print(info_text)
|
||||
continue
|
||||
|
||||
start_time = time.time()
|
||||
|
@ -144,35 +160,147 @@ def clean(source_strings, commit):
|
|||
print("[+] Staring to clean source: {label}!\n".format(label=source.label))
|
||||
|
||||
cleaned = 0
|
||||
for certificate in certificate_service.get_all_pending_cleaning(source):
|
||||
certificates = certificate_service.get_all_pending_cleaning_expired(source)
|
||||
for certificate in certificates:
|
||||
status = FAILURE_METRIC_STATUS
|
||||
if commit:
|
||||
try:
|
||||
s.clean(certificate, source.options)
|
||||
certificate.sources.remove(source)
|
||||
certificate_service.database.update(certificate)
|
||||
status = SUCCESS_METRIC_STATUS
|
||||
except Exception as e:
|
||||
current_app.logger.exception(e)
|
||||
sentry.captureException()
|
||||
status = execute_clean(s, certificate, source)
|
||||
|
||||
metrics.send(
|
||||
"clean",
|
||||
"certificate_clean",
|
||||
"counter",
|
||||
1,
|
||||
metric_tags={"source": source.label, "status": status},
|
||||
metric_tags={"status": status, "source": source.label, "certificate": certificate.name},
|
||||
)
|
||||
|
||||
current_app.logger.warning(
|
||||
"Removed {0} from source {1} during cleaning".format(
|
||||
certificate.name, source.label
|
||||
)
|
||||
)
|
||||
|
||||
current_app.logger.warning(f"Removed {certificate.name} from source {source.label} during cleaning")
|
||||
cleaned += 1
|
||||
|
||||
print(
|
||||
"[+] Finished cleaning source: {label}. Removed {cleaned} certificates from source. Run Time: {time}\n".format(
|
||||
label=source.label, time=(time.time() - start_time), cleaned=cleaned
|
||||
info_text = f"[+] Finished cleaning source: {source.label}. " \
|
||||
f"Removed {cleaned} certificates from source. " \
|
||||
f"Run Time: {(time.time() - start_time)}\n"
|
||||
print(info_text)
|
||||
current_app.logger.warning(info_text)
|
||||
|
||||
|
||||
@manager.option(
|
||||
"-s",
|
||||
"--sources",
|
||||
dest="source_strings",
|
||||
action="append",
|
||||
help="Sources to operate on.",
|
||||
)
|
||||
@manager.option(
|
||||
"-d",
|
||||
"--days",
|
||||
dest="days_to_expire",
|
||||
type=int,
|
||||
action="store",
|
||||
required=True,
|
||||
help="The expiry range within days.",
|
||||
)
|
||||
@manager.option(
|
||||
"-c",
|
||||
"--commit",
|
||||
dest="commit",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="Persist changes.",
|
||||
)
|
||||
def clean_unused_and_expiring_within_days(source_strings, days_to_expire, commit):
|
||||
sources = validate_sources(source_strings)
|
||||
for source in sources:
|
||||
s = plugins.get(source.plugin_name)
|
||||
|
||||
if not hasattr(s, "clean"):
|
||||
info_text = f"Cannot clean source: {source.label}, source plugin does not implement 'clean()'"
|
||||
current_app.logger.warning(info_text)
|
||||
print(info_text)
|
||||
continue
|
||||
|
||||
start_time = time.time()
|
||||
|
||||
print("[+] Staring to clean source: {label}!\n".format(label=source.label))
|
||||
|
||||
cleaned = 0
|
||||
certificates = certificate_service.get_all_pending_cleaning_expiring_in_days(source, days_to_expire)
|
||||
for certificate in certificates:
|
||||
status = FAILURE_METRIC_STATUS
|
||||
if commit:
|
||||
status = execute_clean(s, certificate, source)
|
||||
|
||||
metrics.send(
|
||||
"certificate_clean",
|
||||
"counter",
|
||||
1,
|
||||
metric_tags={"status": status, "source": source.label, "certificate": certificate.name},
|
||||
)
|
||||
current_app.logger.warning(f"Removed {certificate.name} from source {source.label} during cleaning")
|
||||
cleaned += 1
|
||||
|
||||
info_text = f"[+] Finished cleaning source: {source.label}. " \
|
||||
f"Removed {cleaned} certificates from source. " \
|
||||
f"Run Time: {(time.time() - start_time)}\n"
|
||||
print(info_text)
|
||||
current_app.logger.warning(info_text)
|
||||
|
||||
|
||||
@manager.option(
|
||||
"-s",
|
||||
"--sources",
|
||||
dest="source_strings",
|
||||
action="append",
|
||||
help="Sources to operate on.",
|
||||
)
|
||||
@manager.option(
|
||||
"-d",
|
||||
"--days",
|
||||
dest="days_since_issuance",
|
||||
type=int,
|
||||
action="store",
|
||||
required=True,
|
||||
help="Days since issuance.",
|
||||
)
|
||||
@manager.option(
|
||||
"-c",
|
||||
"--commit",
|
||||
dest="commit",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="Persist changes.",
|
||||
)
|
||||
def clean_unused_and_issued_since_days(source_strings, days_since_issuance, commit):
|
||||
sources = validate_sources(source_strings)
|
||||
for source in sources:
|
||||
s = plugins.get(source.plugin_name)
|
||||
|
||||
if not hasattr(s, "clean"):
|
||||
info_text = f"Cannot clean source: {source.label}, source plugin does not implement 'clean()'"
|
||||
current_app.logger.warning(info_text)
|
||||
print(info_text)
|
||||
continue
|
||||
|
||||
start_time = time.time()
|
||||
|
||||
print("[+] Staring to clean source: {label}!\n".format(label=source.label))
|
||||
|
||||
cleaned = 0
|
||||
certificates = certificate_service.get_all_pending_cleaning_issued_since_days(source, days_since_issuance)
|
||||
for certificate in certificates:
|
||||
status = FAILURE_METRIC_STATUS
|
||||
if commit:
|
||||
status = execute_clean(s, certificate, source)
|
||||
|
||||
metrics.send(
|
||||
"certificate_clean",
|
||||
"counter",
|
||||
1,
|
||||
metric_tags={"status": status, "source": source.label, "certificate": certificate.name},
|
||||
)
|
||||
current_app.logger.warning(f"Removed {certificate.name} from source {source.label} during cleaning")
|
||||
cleaned += 1
|
||||
|
||||
info_text = f"[+] Finished cleaning source: {source.label}. " \
|
||||
f"Removed {cleaned} certificates from source. " \
|
||||
f"Run Time: {(time.time() - start_time)}\n"
|
||||
print(info_text)
|
||||
current_app.logger.warning(info_text)
|
||||
|
|
|
@ -15,7 +15,7 @@ from lemur.sources.models import Source
|
|||
from lemur.certificates.models import Certificate
|
||||
from lemur.certificates import service as certificate_service
|
||||
from lemur.endpoints import service as endpoint_service
|
||||
from lemur.extensions import metrics
|
||||
from lemur.extensions import metrics, sentry
|
||||
from lemur.destinations import service as destination_service
|
||||
|
||||
from lemur.certificates.schemas import CertificateUploadInputSchema
|
||||
|
@ -66,7 +66,7 @@ def sync_update_destination(certificate, source):
|
|||
|
||||
|
||||
def sync_endpoints(source):
|
||||
new, updated = 0, 0
|
||||
new, updated, updated_by_hash = 0, 0, 0
|
||||
current_app.logger.debug("Retrieving endpoints from {0}".format(source.label))
|
||||
s = plugins.get(source.plugin_name)
|
||||
|
||||
|
@ -78,7 +78,7 @@ def sync_endpoints(source):
|
|||
source.label
|
||||
)
|
||||
)
|
||||
return new, updated
|
||||
return new, updated, updated_by_hash
|
||||
|
||||
for endpoint in endpoints:
|
||||
exists = endpoint_service.get_by_dnsname_and_port(
|
||||
|
@ -89,15 +89,53 @@ def sync_endpoints(source):
|
|||
|
||||
endpoint["certificate"] = certificate_service.get_by_name(certificate_name)
|
||||
|
||||
# if get cert by name failed, we attempt a search via serial number and hash comparison
|
||||
# and link the endpoint certificate to Lemur certificate
|
||||
if not endpoint["certificate"]:
|
||||
certificate_attached_to_endpoint = None
|
||||
try:
|
||||
certificate_attached_to_endpoint = s.get_certificate_by_name(certificate_name, source.options)
|
||||
except NotImplementedError:
|
||||
current_app.logger.warning(
|
||||
"Unable to describe server certificate for endpoints in source {0}:"
|
||||
" plugin has not implemented 'get_certificate_by_name'".format(
|
||||
source.label
|
||||
)
|
||||
)
|
||||
sentry.captureException()
|
||||
|
||||
if certificate_attached_to_endpoint:
|
||||
lemur_matching_cert, updated_by_hash_tmp = find_cert(certificate_attached_to_endpoint)
|
||||
updated_by_hash += updated_by_hash_tmp
|
||||
|
||||
if lemur_matching_cert:
|
||||
endpoint["certificate"] = lemur_matching_cert[0]
|
||||
|
||||
if len(lemur_matching_cert) > 1:
|
||||
current_app.logger.error(
|
||||
"Certificate Not Found. Name: {0} Endpoint: {1}".format(
|
||||
certificate_name, endpoint["name"]
|
||||
"Too Many Certificates Found{0}. Name: {1} Endpoint: {2}".format(
|
||||
len(lemur_matching_cert), certificate_name, endpoint["name"]
|
||||
)
|
||||
)
|
||||
metrics.send("endpoint.certificate.conflict",
|
||||
"gauge", len(lemur_matching_cert),
|
||||
metric_tags={"cert": certificate_name, "endpoint": endpoint["name"],
|
||||
"acct": s.get_option("accountNumber", source.options)})
|
||||
|
||||
if not endpoint["certificate"]:
|
||||
current_app.logger.error({
|
||||
"message": "Certificate Not Found",
|
||||
"certificate_name": certificate_name,
|
||||
"endpoint_name": endpoint["name"],
|
||||
"dns_name": endpoint.get("dnsname"),
|
||||
"account": s.get_option("accountNumber", source.options),
|
||||
})
|
||||
|
||||
metrics.send("endpoint.certificate.not.found",
|
||||
"counter", 1,
|
||||
metric_tags={"cert": certificate_name, "endpoint": endpoint["name"], "acct": s.get_option("accountNumber", source.options)})
|
||||
metric_tags={"cert": certificate_name, "endpoint": endpoint["name"],
|
||||
"acct": s.get_option("accountNumber", source.options),
|
||||
"dnsname": endpoint.get("dnsname")})
|
||||
continue
|
||||
|
||||
policy = endpoint.pop("policy")
|
||||
|
@ -122,18 +160,11 @@ def sync_endpoints(source):
|
|||
endpoint_service.update(exists.id, **endpoint)
|
||||
updated += 1
|
||||
|
||||
return new, updated
|
||||
return new, updated, updated_by_hash
|
||||
|
||||
|
||||
# TODO this is very slow as we don't batch update certificates
|
||||
def sync_certificates(source, user):
|
||||
new, updated = 0, 0
|
||||
|
||||
current_app.logger.debug("Retrieving certificates from {0}".format(source.label))
|
||||
s = plugins.get(source.plugin_name)
|
||||
certificates = s.get_certificates(source.options)
|
||||
|
||||
for certificate in certificates:
|
||||
def find_cert(certificate):
|
||||
updated_by_hash = 0
|
||||
exists = False
|
||||
|
||||
if certificate.get("search", None):
|
||||
|
@ -152,12 +183,32 @@ def sync_certificates(source, user):
|
|||
cert = parse_certificate(certificate["body"])
|
||||
matching_serials = certificate_service.get_by_serial(serial(cert))
|
||||
exists = find_matching_certificates_by_hash(cert, matching_serials)
|
||||
updated_by_hash += 1
|
||||
|
||||
exists = [x for x in exists if x]
|
||||
return exists, updated_by_hash
|
||||
|
||||
|
||||
# TODO this is very slow as we don't batch update certificates
|
||||
def sync_certificates(source, user):
|
||||
new, updated, updated_by_hash = 0, 0, 0
|
||||
|
||||
current_app.logger.debug("Retrieving certificates from {0}".format(source.label))
|
||||
s = plugins.get(source.plugin_name)
|
||||
certificates = s.get_certificates(source.options)
|
||||
|
||||
# emitting the count of certificates on the source
|
||||
metrics.send("sync_certificates_count",
|
||||
"gauge", len(certificates),
|
||||
metric_tags={"source": source.label})
|
||||
|
||||
for certificate in certificates:
|
||||
exists, updated_by_hash = find_cert(certificate)
|
||||
|
||||
if not certificate.get("owner"):
|
||||
certificate["owner"] = user.email
|
||||
|
||||
certificate["creator"] = user
|
||||
exists = [x for x in exists if x]
|
||||
|
||||
if not exists:
|
||||
certificate_create(certificate, source)
|
||||
|
@ -172,12 +223,20 @@ def sync_certificates(source, user):
|
|||
certificate_update(e, source)
|
||||
updated += 1
|
||||
|
||||
return new, updated
|
||||
return new, updated, updated_by_hash
|
||||
|
||||
|
||||
def sync(source, user):
|
||||
new_certs, updated_certs = sync_certificates(source, user)
|
||||
new_endpoints, updated_endpoints = sync_endpoints(source)
|
||||
new_certs, updated_certs, updated_certs_by_hash = sync_certificates(source, user)
|
||||
new_endpoints, updated_endpoints, updated_endpoints_by_hash = sync_endpoints(source)
|
||||
|
||||
metrics.send("sync.updated_certs_by_hash",
|
||||
"gauge", updated_certs_by_hash,
|
||||
metric_tags={"source": source.label})
|
||||
|
||||
metrics.send("sync.updated_endpoints_by_hash",
|
||||
"gauge", updated_endpoints_by_hash,
|
||||
metric_tags={"source": source.label})
|
||||
|
||||
source.last_run = arrow.utcnow()
|
||||
database.update(source)
|
||||
|
|
|
@ -371,4 +371,12 @@ angular.module('lemur')
|
|||
});
|
||||
});
|
||||
};
|
||||
})
|
||||
.controller('CertificateInfoController', function ($scope, CertificateApi) {
|
||||
$scope.fetchFullCertificate = function (certId) {
|
||||
CertificateApi.get(certId).then(function (certificate) {
|
||||
$scope.certificate = certificate;
|
||||
});
|
||||
};
|
||||
})
|
||||
;
|
||||
|
|
|
@ -133,16 +133,13 @@
|
|||
</div>
|
||||
<div class="form-group" ng-hide="certificate.authority.plugin.slug == 'acme-issuer'">
|
||||
<label class="control-label col-sm-2"
|
||||
uib-tooltip="If no date is selected Lemur attempts to issue a 2 year certificate">
|
||||
uib-tooltip="If no date is selected Lemur attempts to issue a 1 year certificate">
|
||||
Validity Range <span class="glyphicon glyphicon-question-sign"></span>
|
||||
</label>
|
||||
<div class="col-sm-2">
|
||||
<select ng-model="certificate.validityYears" class="form-control">
|
||||
<option value="">-</option>
|
||||
<option value="1">1 year</option>
|
||||
<option value="2">2 years</option>
|
||||
<option value="3">3 years</option>
|
||||
<option value="4">4 years</option>
|
||||
</select>
|
||||
</div>
|
||||
<span style="padding-top: 15px" class="text-center col-sm-1">
|
||||
|
|
|
@ -11,7 +11,7 @@ angular.module('lemur')
|
|||
controller: 'CertificatesViewController'
|
||||
})
|
||||
.state('certificate', {
|
||||
url: '/certificates/:name',
|
||||
url: '/certificates/:fixedName', // use "fixedName" if in URL to indicate 'like' query can be avoided
|
||||
templateUrl: '/angular/certificates/view/view.tpl.html',
|
||||
controller: 'CertificatesViewController'
|
||||
});
|
||||
|
@ -28,6 +28,7 @@ angular.module('lemur')
|
|||
sorting: {
|
||||
id: 'desc' // initial sorting
|
||||
},
|
||||
short: true,
|
||||
filter: $scope.filter
|
||||
}, {
|
||||
total: 0, // length of data
|
||||
|
@ -54,6 +55,7 @@ angular.module('lemur')
|
|||
sorting: {
|
||||
id: 'desc' // initial sorting
|
||||
},
|
||||
short: true,
|
||||
filter: $scope.filter
|
||||
}, {
|
||||
getData: function ($defer, params) {
|
||||
|
|
|
@ -52,7 +52,7 @@
|
|||
</td>
|
||||
<td data-title="''" style="text-align: center; vertical-align: middle;">
|
||||
<div class="btn-group pull-right" role="group" aria-label="...">
|
||||
<a class="btn btn-sm btn-primary" ui-sref="certificate({name: certificate.name})">Permalink</a>
|
||||
<a class="btn btn-sm btn-primary" ui-sref="certificate({fixedName: certificate.name})">Permalink</a>
|
||||
<button ng-model="certificate.toggle" class="btn btn-sm btn-info" uib-btn-checkbox btn-checkbox-true="1"
|
||||
btn-checkbox-false="0">More
|
||||
</button>
|
||||
|
@ -71,7 +71,7 @@
|
|||
</div>
|
||||
</td>
|
||||
</tr>
|
||||
<tr class="warning" ng-if="certificate.toggle" ng-repeat-end>
|
||||
<tr class="warning" ng-if="certificate.toggle" ng-controller="CertificateInfoController" ng-init="fetchFullCertificate(certificate.id)" ng-repeat-end>
|
||||
<td colspan="12">
|
||||
<uib-tabset justified="true" class="col-md-8">
|
||||
<uib-tab>
|
||||
|
|
|
@ -9,7 +9,8 @@ from cryptography import x509
|
|||
from cryptography.hazmat.backends import default_backend
|
||||
from marshmallow import ValidationError
|
||||
from freezegun import freeze_time
|
||||
from mock import patch
|
||||
# from mock import patch
|
||||
from unittest.mock import patch
|
||||
|
||||
from lemur.certificates.service import create_csr
|
||||
from lemur.certificates.views import * # noqa
|
||||
|
@ -906,12 +907,12 @@ def test_certificate_get_body(client):
|
|||
assert response_body["serial"] == "211983098819107449768450703123665283596"
|
||||
assert response_body["serialHex"] == "9F7A75B39DAE4C3F9524C68B06DA6A0C"
|
||||
assert response_body["distinguishedName"] == (
|
||||
"CN=LemurTrust Unittests Class 1 CA 2018,"
|
||||
"O=LemurTrust Enterprises Ltd,"
|
||||
"OU=Unittesting Operations Center,"
|
||||
"C=EE,"
|
||||
"L=Earth,"
|
||||
"ST=N/A,"
|
||||
"L=Earth"
|
||||
"C=EE,"
|
||||
"OU=Unittesting Operations Center,"
|
||||
"O=LemurTrust Enterprises Ltd,"
|
||||
"CN=LemurTrust Unittests Class 1 CA 2018"
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
import unittest
|
||||
from lemur.dns_providers import util as dnsutil
|
||||
|
||||
|
||||
class TestDNSProvider(unittest.TestCase):
|
||||
def test_is_valid_domain(self):
|
||||
self.assertTrue(dnsutil.is_valid_domain("example.com"))
|
||||
self.assertTrue(dnsutil.is_valid_domain("foo.bar.org"))
|
||||
self.assertTrue(dnsutil.is_valid_domain("_acme-chall.example.com"))
|
||||
self.assertFalse(dnsutil.is_valid_domain("e/xample.com"))
|
||||
self.assertFalse(dnsutil.is_valid_domain("exam\ple.com"))
|
||||
self.assertFalse(dnsutil.is_valid_domain("*.example.com"))
|
|
@ -10,11 +10,11 @@ def test_convert_validity_years(session):
|
|||
data = convert_validity_years(dict(validity_years=2))
|
||||
|
||||
assert data["validity_start"] == arrow.utcnow().isoformat()
|
||||
assert data["validity_end"] == arrow.utcnow().replace(years=+2).isoformat()
|
||||
assert data["validity_end"] == arrow.utcnow().shift(years=+2).isoformat()
|
||||
|
||||
with freeze_time("2015-01-10"):
|
||||
data = convert_validity_years(dict(validity_years=1))
|
||||
assert (
|
||||
data["validity_end"]
|
||||
== arrow.utcnow().replace(years=+1, days=-2).isoformat()
|
||||
== arrow.utcnow().shift(years=+1, days=-2).isoformat()
|
||||
)
|
||||
|
|
16
package.json
16
package.json
|
@ -6,8 +6,8 @@
|
|||
"url": "git://github.com/netflix/lemur.git"
|
||||
},
|
||||
"dependencies": {
|
||||
"bower": "^1.8.2",
|
||||
"browser-sync": "^2.3.1",
|
||||
"bower": "^1.8.8",
|
||||
"browser-sync": "^2.26.7",
|
||||
"del": "^2.2.2",
|
||||
"gulp-autoprefixer": "^3.1.1",
|
||||
"gulp-cache": "^0.4.5",
|
||||
|
@ -17,19 +17,19 @@
|
|||
"gulp-flatten": "^0.3.1",
|
||||
"gulp-foreach": "0.1.0",
|
||||
"gulp-if": "^2.0.2",
|
||||
"gulp-imagemin": "^3.1.1",
|
||||
"gulp-imagemin": "^7.1.0",
|
||||
"gulp-inject": "~4.1.0",
|
||||
"gulp-jshint": "^2.0.4",
|
||||
"gulp-less": "^3.0.3",
|
||||
"gulp-less": "^4.0.1",
|
||||
"gulp-load-plugins": "^1.4.0",
|
||||
"gulp-minify-css": "^1.2.4",
|
||||
"gulp-minify-html": "~1.0.6",
|
||||
"gulp-ng-annotate": "~2.0.0",
|
||||
"gulp-ng-html2js": "~0.2.2",
|
||||
"gulp-ng-html2js": "^0.2.3",
|
||||
"gulp-notify": "^2.2.0",
|
||||
"gulp-plumber": "^1.1.0",
|
||||
"gulp-print": "^2.0.1",
|
||||
"gulp-protractor": "3.0.0",
|
||||
"gulp-protractor": "^4.1.1",
|
||||
"gulp-replace": "~0.5.3",
|
||||
"gulp-replace-task": "~0.11.0",
|
||||
"gulp-rev": "^7.1.2",
|
||||
|
@ -41,7 +41,7 @@
|
|||
"gulp-util": "^3.0.1",
|
||||
"http-proxy": "~1.16.2",
|
||||
"jshint-stylish": "^2.2.1",
|
||||
"karma": "~1.3.0",
|
||||
"karma": "^4.4.1",
|
||||
"karma-jasmine": "^1.1.0",
|
||||
"main-bower-files": "^2.13.1",
|
||||
"merge-stream": "^1.0.1",
|
||||
|
@ -60,7 +60,7 @@
|
|||
},
|
||||
"devDependencies": {
|
||||
"gulp": "^3.9.1",
|
||||
"jshint": "^2.8.0",
|
||||
"jshint": "^2.11.0",
|
||||
"karma-chrome-launcher": "^2.0.0"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,36 +4,42 @@
|
|||
#
|
||||
# pip-compile --no-index --output-file=requirements-dev.txt requirements-dev.in
|
||||
#
|
||||
aspy.yaml==1.3.0 # via pre-commit
|
||||
bleach==3.1.0 # via readme-renderer
|
||||
certifi==2019.6.16 # via requests
|
||||
cfgv==2.0.0 # via pre-commit
|
||||
appdirs==1.4.3 # via virtualenv
|
||||
bleach==3.1.4 # via readme-renderer
|
||||
certifi==2020.4.5.1 # via requests
|
||||
cffi==1.14.0 # via cryptography
|
||||
cfgv==3.1.0 # via pre-commit
|
||||
chardet==3.0.4 # via requests
|
||||
docutils==0.14 # via readme-renderer
|
||||
flake8==3.5.0
|
||||
identify==1.4.5 # via pre-commit
|
||||
idna==2.8 # via requests
|
||||
importlib-metadata==0.18 # via pre-commit
|
||||
invoke==1.2.0
|
||||
cryptography==2.9.2 # via secretstorage
|
||||
distlib==0.3.0 # via virtualenv
|
||||
docutils==0.16 # via readme-renderer
|
||||
filelock==3.0.12 # via virtualenv
|
||||
flake8==3.5.0 # via -r requirements-dev.in
|
||||
identify==1.4.14 # via pre-commit
|
||||
idna==2.9 # via requests
|
||||
invoke==1.4.1 # via -r requirements-dev.in
|
||||
jeepney==0.4.3 # via keyring, secretstorage
|
||||
keyring==21.2.0 # via twine
|
||||
mccabe==0.6.1 # via flake8
|
||||
nodeenv==1.3.3
|
||||
nodeenv==1.3.5 # via -r requirements-dev.in, pre-commit
|
||||
pkginfo==1.5.0.1 # via twine
|
||||
pre-commit==1.17.0
|
||||
pre-commit==2.4.0 # via -r requirements-dev.in
|
||||
pycodestyle==2.3.1 # via flake8
|
||||
pycparser==2.20 # via cffi
|
||||
pyflakes==1.6.0 # via flake8
|
||||
pygments==2.4.2 # via readme-renderer
|
||||
pyyaml==5.1.1
|
||||
readme-renderer==24.0 # via twine
|
||||
pygments==2.6.1 # via readme-renderer
|
||||
pyyaml==5.3.1 # via -r requirements-dev.in, pre-commit
|
||||
readme-renderer==25.0 # via twine
|
||||
requests-toolbelt==0.9.1 # via twine
|
||||
requests==2.22.0 # via requests-toolbelt, twine
|
||||
six==1.12.0 # via bleach, cfgv, pre-commit, readme-renderer
|
||||
requests==2.23.0 # via requests-toolbelt, twine
|
||||
secretstorage==3.1.2 # via keyring
|
||||
six==1.14.0 # via bleach, cryptography, readme-renderer, virtualenv
|
||||
toml==0.10.0 # via pre-commit
|
||||
tqdm==4.32.2 # via twine
|
||||
twine==1.13.0
|
||||
urllib3==1.25.3 # via requests
|
||||
virtualenv==16.6.1 # via pre-commit
|
||||
tqdm==4.45.0 # via twine
|
||||
twine==3.1.1 # via -r requirements-dev.in
|
||||
urllib3==1.25.8 # via requests
|
||||
virtualenv==20.0.17 # via pre-commit
|
||||
webencodings==0.5.1 # via bleach
|
||||
zipp==0.5.2 # via importlib-metadata
|
||||
|
||||
# The following packages are considered to be unsafe in a requirements file:
|
||||
# setuptools==41.0.1 # via twine
|
||||
# setuptools
|
||||
|
|
|
@ -4,109 +4,108 @@
|
|||
#
|
||||
# pip-compile --no-index --output-file=requirements-docs.txt requirements-docs.in
|
||||
#
|
||||
acme==0.36.0
|
||||
acme==1.4.0 # via -r requirements.txt
|
||||
alabaster==0.7.12 # via sphinx
|
||||
alembic-autogenerate-enums==0.0.2
|
||||
alembic==1.0.11
|
||||
amqp==2.5.0
|
||||
aniso8601==7.0.0
|
||||
arrow==0.14.2
|
||||
asn1crypto==0.24.0
|
||||
asyncpool==1.0
|
||||
babel==2.7.0 # via sphinx
|
||||
bcrypt==3.1.7
|
||||
billiard==3.6.0.0
|
||||
blinker==1.4
|
||||
boto3==1.9.187
|
||||
botocore==1.12.187
|
||||
celery[redis]==4.3.0
|
||||
certifi==2019.6.16
|
||||
certsrv==2.1.1
|
||||
cffi==1.12.3
|
||||
chardet==3.0.4
|
||||
click==7.0
|
||||
cloudflare==2.3.0
|
||||
cryptography==2.7
|
||||
dnspython3==1.15.0
|
||||
dnspython==1.15.0
|
||||
docutils==0.14
|
||||
dyn==1.8.1
|
||||
flask-bcrypt==0.7.1
|
||||
flask-cors==3.0.8
|
||||
flask-mail==0.9.1
|
||||
flask-migrate==2.5.2
|
||||
flask-principal==0.4.0
|
||||
flask-replicated==1.3
|
||||
flask-restful==0.3.7
|
||||
flask-script==2.0.6
|
||||
flask-sqlalchemy==2.4.0
|
||||
flask==1.1.1
|
||||
future==0.17.1
|
||||
gunicorn==19.9.0
|
||||
hvac==0.9.3
|
||||
idna==2.8
|
||||
imagesize==1.1.0 # via sphinx
|
||||
inflection==0.3.1
|
||||
itsdangerous==1.1.0
|
||||
javaobj-py3==0.3.0
|
||||
jinja2==2.10.1
|
||||
jmespath==0.9.4
|
||||
josepy==1.2.0
|
||||
jsonlines==1.2.0
|
||||
kombu==4.5.0
|
||||
lockfile==0.12.2
|
||||
logmatic-python==0.1.7
|
||||
mako==1.0.13
|
||||
markupsafe==1.1.1
|
||||
marshmallow-sqlalchemy==0.17.0
|
||||
marshmallow==2.19.5
|
||||
mock==3.0.5
|
||||
ndg-httpsclient==0.5.1
|
||||
packaging==19.0 # via sphinx
|
||||
paramiko==2.6.0
|
||||
pem==19.1.0
|
||||
psycopg2==2.8.3
|
||||
pyasn1-modules==0.2.5
|
||||
pyasn1==0.4.5
|
||||
pycparser==2.19
|
||||
pycryptodomex==3.8.2
|
||||
pygments==2.4.2 # via sphinx
|
||||
pyjks==19.0.0
|
||||
pyjwt==1.7.1
|
||||
pynacl==1.3.0
|
||||
pyopenssl==19.0.0
|
||||
pyparsing==2.4.0 # via packaging
|
||||
pyrfc3339==1.1
|
||||
python-dateutil==2.8.0
|
||||
python-editor==1.0.4
|
||||
python-json-logger==0.1.11
|
||||
pytz==2019.1
|
||||
pyyaml==5.1.1
|
||||
raven[flask]==6.10.0
|
||||
redis==3.2.1
|
||||
requests-toolbelt==0.9.1
|
||||
requests[security]==2.22.0
|
||||
retrying==1.3.3
|
||||
s3transfer==0.2.1
|
||||
six==1.12.0
|
||||
snowballstemmer==1.9.0 # via sphinx
|
||||
sphinx-rtd-theme==0.4.3
|
||||
sphinx==2.1.2
|
||||
sphinxcontrib-applehelp==1.0.1 # via sphinx
|
||||
sphinxcontrib-devhelp==1.0.1 # via sphinx
|
||||
sphinxcontrib-htmlhelp==1.0.2 # via sphinx
|
||||
sphinxcontrib-httpdomain==1.7.0
|
||||
alembic-autogenerate-enums==0.0.2 # via -r requirements.txt
|
||||
alembic==1.4.2 # via -r requirements.txt, flask-migrate
|
||||
amqp==2.5.2 # via -r requirements.txt, kombu
|
||||
aniso8601==8.0.0 # via -r requirements.txt, flask-restful
|
||||
arrow==0.15.6 # via -r requirements.txt
|
||||
asyncpool==1.0 # via -r requirements.txt
|
||||
babel==2.8.0 # via sphinx
|
||||
bcrypt==3.1.7 # via -r requirements.txt, flask-bcrypt, paramiko
|
||||
billiard==3.6.3.0 # via -r requirements.txt, celery
|
||||
blinker==1.4 # via -r requirements.txt, flask-mail, flask-principal, raven
|
||||
boto3==1.13.11 # via -r requirements.txt
|
||||
botocore==1.16.11 # via -r requirements.txt, boto3, s3transfer
|
||||
celery[redis]==4.4.2 # via -r requirements.txt
|
||||
certifi==2020.4.5.1 # via -r requirements.txt, requests
|
||||
certsrv==2.1.1 # via -r requirements.txt
|
||||
cffi==1.14.0 # via -r requirements.txt, bcrypt, cryptography, pynacl
|
||||
chardet==3.0.4 # via -r requirements.txt, requests
|
||||
click==7.1.1 # via -r requirements.txt, flask
|
||||
cloudflare==2.7.1 # via -r requirements.txt
|
||||
cryptography==2.9.2 # via -r requirements.txt, acme, josepy, paramiko, pyopenssl, requests
|
||||
dnspython3==1.15.0 # via -r requirements.txt
|
||||
dnspython==1.15.0 # via -r requirements.txt, dnspython3
|
||||
docutils==0.15.2 # via -r requirements.txt, botocore, sphinx
|
||||
dyn==1.8.1 # via -r requirements.txt
|
||||
flask-bcrypt==0.7.1 # via -r requirements.txt
|
||||
flask-cors==3.0.8 # via -r requirements.txt
|
||||
flask-mail==0.9.1 # via -r requirements.txt
|
||||
flask-migrate==2.5.3 # via -r requirements.txt
|
||||
flask-principal==0.4.0 # via -r requirements.txt
|
||||
flask-replicated==1.3 # via -r requirements.txt
|
||||
flask-restful==0.3.8 # via -r requirements.txt
|
||||
flask-script==2.0.6 # via -r requirements.txt
|
||||
flask-sqlalchemy==2.4.1 # via -r requirements.txt, flask-migrate
|
||||
flask==1.1.2 # via -r requirements.txt, flask-bcrypt, flask-cors, flask-mail, flask-migrate, flask-principal, flask-restful, flask-script, flask-sqlalchemy, raven
|
||||
future==0.18.2 # via -r requirements.txt, cloudflare
|
||||
gunicorn==20.0.4 # via -r requirements.txt
|
||||
hvac==0.10.1 # via -r requirements.txt
|
||||
idna==2.9 # via -r requirements.txt, requests
|
||||
imagesize==1.2.0 # via sphinx
|
||||
inflection==0.4.0 # via -r requirements.txt
|
||||
itsdangerous==1.1.0 # via -r requirements.txt, flask
|
||||
javaobj-py3==0.4.0.1 # via -r requirements.txt, pyjks
|
||||
jinja2==2.11.2 # via -r requirements.txt, flask, sphinx
|
||||
jmespath==0.9.5 # via -r requirements.txt, boto3, botocore
|
||||
josepy==1.3.0 # via -r requirements.txt, acme
|
||||
jsonlines==1.2.0 # via -r requirements.txt, cloudflare
|
||||
kombu==4.6.8 # via -r requirements.txt, celery
|
||||
lockfile==0.12.2 # via -r requirements.txt
|
||||
logmatic-python==0.1.7 # via -r requirements.txt
|
||||
mako==1.1.2 # via -r requirements.txt, alembic
|
||||
markupsafe==1.1.1 # via -r requirements.txt, jinja2, mako
|
||||
marshmallow-sqlalchemy==0.23.0 # via -r requirements.txt
|
||||
marshmallow==2.20.4 # via -r requirements.txt, marshmallow-sqlalchemy
|
||||
ndg-httpsclient==0.5.1 # via -r requirements.txt
|
||||
packaging==20.3 # via sphinx
|
||||
paramiko==2.7.1 # via -r requirements.txt
|
||||
pem==20.1.0 # via -r requirements.txt
|
||||
psycopg2==2.8.5 # via -r requirements.txt
|
||||
pyasn1-modules==0.2.8 # via -r requirements.txt, pyjks, python-ldap
|
||||
pyasn1==0.4.8 # via -r requirements.txt, ndg-httpsclient, pyasn1-modules, pyjks, python-ldap
|
||||
pycparser==2.20 # via -r requirements.txt, cffi
|
||||
pycryptodomex==3.9.7 # via -r requirements.txt, pyjks
|
||||
pygments==2.6.1 # via sphinx
|
||||
pyjks==20.0.0 # via -r requirements.txt
|
||||
pyjwt==1.7.1 # via -r requirements.txt
|
||||
pynacl==1.3.0 # via -r requirements.txt, paramiko
|
||||
pyopenssl==19.1.0 # via -r requirements.txt, acme, josepy, ndg-httpsclient, requests
|
||||
pyparsing==2.4.7 # via packaging
|
||||
pyrfc3339==1.1 # via -r requirements.txt, acme
|
||||
python-dateutil==2.8.1 # via -r requirements.txt, alembic, arrow, botocore
|
||||
python-editor==1.0.4 # via -r requirements.txt, alembic
|
||||
python-json-logger==0.1.11 # via -r requirements.txt, logmatic-python
|
||||
python-ldap==3.2.0 # via -r requirements.txt
|
||||
pytz==2019.3 # via -r requirements.txt, acme, babel, celery, flask-restful, pyrfc3339
|
||||
pyyaml==5.3.1 # via -r requirements.txt, cloudflare
|
||||
raven[flask]==6.10.0 # via -r requirements.txt
|
||||
redis==3.5.2 # via -r requirements.txt, celery
|
||||
requests-toolbelt==0.9.1 # via -r requirements.txt, acme
|
||||
requests[security]==2.23.0 # via -r requirements.txt, acme, certsrv, cloudflare, hvac, requests-toolbelt, sphinx
|
||||
retrying==1.3.3 # via -r requirements.txt
|
||||
s3transfer==0.3.3 # via -r requirements.txt, boto3
|
||||
six==1.14.0 # via -r requirements.txt, acme, bcrypt, cryptography, flask-cors, flask-restful, hvac, josepy, jsonlines, packaging, pynacl, pyopenssl, python-dateutil, retrying, sphinxcontrib-httpdomain, sqlalchemy-utils
|
||||
snowballstemmer==2.0.0 # via sphinx
|
||||
sphinx-rtd-theme==0.4.3 # via -r requirements-docs.in
|
||||
sphinx==3.0.3 # via -r requirements-docs.in, sphinx-rtd-theme, sphinxcontrib-httpdomain
|
||||
sphinxcontrib-applehelp==1.0.2 # via sphinx
|
||||
sphinxcontrib-devhelp==1.0.2 # via sphinx
|
||||
sphinxcontrib-htmlhelp==1.0.3 # via sphinx
|
||||
sphinxcontrib-httpdomain==1.7.0 # via -r requirements-docs.in
|
||||
sphinxcontrib-jsmath==1.0.1 # via sphinx
|
||||
sphinxcontrib-qthelp==1.0.2 # via sphinx
|
||||
sphinxcontrib-serializinghtml==1.1.3 # via sphinx
|
||||
sqlalchemy-utils==0.34.0
|
||||
sqlalchemy==1.3.5
|
||||
tabulate==0.8.3
|
||||
twofish==0.3.0
|
||||
urllib3==1.25.3
|
||||
vine==1.3.0
|
||||
werkzeug==0.15.4
|
||||
xmltodict==0.12.0
|
||||
sphinxcontrib-qthelp==1.0.3 # via sphinx
|
||||
sphinxcontrib-serializinghtml==1.1.4 # via sphinx
|
||||
sqlalchemy-utils==0.36.5 # via -r requirements.txt
|
||||
sqlalchemy==1.3.16 # via -r requirements.txt, alembic, flask-sqlalchemy, marshmallow-sqlalchemy, sqlalchemy-utils
|
||||
tabulate==0.8.7 # via -r requirements.txt
|
||||
twofish==0.3.0 # via -r requirements.txt, pyjks
|
||||
urllib3==1.25.8 # via -r requirements.txt, botocore, requests
|
||||
vine==1.3.0 # via -r requirements.txt, amqp, celery
|
||||
werkzeug==1.0.1 # via -r requirements.txt, flask
|
||||
xmltodict==0.12.0 # via -r requirements.txt
|
||||
|
||||
# The following packages are considered to be unsafe in a requirements file:
|
||||
# setuptools==41.0.1 # via acme, josepy, sphinx
|
||||
# setuptools
|
||||
|
|
|
@ -5,87 +5,88 @@
|
|||
# pip-compile --no-index --output-file=requirements-tests.txt requirements-tests.in
|
||||
#
|
||||
appdirs==1.4.3 # via black
|
||||
asn1crypto==0.24.0 # via cryptography
|
||||
atomicwrites==1.3.0 # via pytest
|
||||
attrs==19.1.0 # via black, jsonschema, pytest
|
||||
aws-sam-translator==1.12.0 # via cfn-lint
|
||||
aws-xray-sdk==2.4.2 # via moto
|
||||
bandit==1.6.2
|
||||
black==19.3b0
|
||||
boto3==1.9.187 # via aws-sam-translator, moto
|
||||
attrs==19.3.0 # via black, jsonschema, pytest
|
||||
aws-sam-translator==1.22.0 # via cfn-lint
|
||||
aws-xray-sdk==2.5.0 # via moto
|
||||
bandit==1.6.2 # via -r requirements-tests.in
|
||||
black==19.10b0 # via -r requirements-tests.in
|
||||
boto3==1.13.11 # via aws-sam-translator, moto
|
||||
boto==2.49.0 # via moto
|
||||
botocore==1.12.187 # via aws-xray-sdk, boto3, moto, s3transfer
|
||||
certifi==2019.6.16 # via requests
|
||||
cffi==1.12.3 # via cryptography
|
||||
cfn-lint==0.22.2 # via moto
|
||||
botocore==1.16.11 # via aws-xray-sdk, boto3, moto, s3transfer
|
||||
certifi==2020.4.5.1 # via requests
|
||||
cffi==1.14.0 # via cryptography
|
||||
cfn-lint==0.29.5 # via moto
|
||||
chardet==3.0.4 # via requests
|
||||
click==7.0 # via black, flask
|
||||
coverage==4.5.3
|
||||
cryptography==2.7 # via moto, sshpubkeys
|
||||
datetime==4.3 # via moto
|
||||
docker==4.0.2 # via moto
|
||||
docutils==0.14 # via botocore
|
||||
ecdsa==0.13.2 # via python-jose, sshpubkeys
|
||||
factory-boy==2.12.0
|
||||
faker==1.0.7
|
||||
fakeredis==1.0.3
|
||||
flask==1.1.1 # via pytest-flask
|
||||
freezegun==0.3.12
|
||||
future==0.17.1 # via aws-xray-sdk, python-jose
|
||||
gitdb2==2.0.5 # via gitpython
|
||||
gitpython==2.1.11 # via bandit
|
||||
click==7.1.1 # via black, flask
|
||||
coverage==5.1 # via -r requirements-tests.in
|
||||
cryptography==2.9.2 # via moto, sshpubkeys
|
||||
decorator==4.4.2 # via networkx
|
||||
docker==4.2.0 # via moto
|
||||
docutils==0.15.2 # via botocore
|
||||
ecdsa==0.15 # via python-jose, sshpubkeys
|
||||
factory-boy==2.12.0 # via -r requirements-tests.in
|
||||
faker==4.1.0 # via -r requirements-tests.in, factory-boy
|
||||
fakeredis==1.4.1 # via -r requirements-tests.in
|
||||
flask==1.1.2 # via pytest-flask
|
||||
freezegun==0.3.15 # via -r requirements-tests.in
|
||||
future==0.18.2 # via aws-xray-sdk
|
||||
gitdb==4.0.4 # via gitpython
|
||||
gitpython==3.1.1 # via bandit
|
||||
idna==2.8 # via moto, requests
|
||||
importlib-metadata==0.18 # via pluggy, pytest
|
||||
importlib-metadata==1.6.0 # via jsonpickle
|
||||
itsdangerous==1.1.0 # via flask
|
||||
jinja2==2.10.1 # via flask, moto
|
||||
jmespath==0.9.4 # via boto3, botocore
|
||||
jinja2==2.11.2 # via flask, moto
|
||||
jmespath==0.9.5 # via boto3, botocore
|
||||
jsondiff==1.1.2 # via moto
|
||||
jsonpatch==1.23 # via cfn-lint
|
||||
jsonpickle==1.2 # via aws-xray-sdk
|
||||
jsonpatch==1.25 # via cfn-lint
|
||||
jsonpickle==1.4 # via aws-xray-sdk
|
||||
jsonpointer==2.0 # via jsonpatch
|
||||
jsonschema==3.0.1 # via aws-sam-translator, cfn-lint
|
||||
jsonschema==3.2.0 # via aws-sam-translator, cfn-lint
|
||||
markupsafe==1.1.1 # via jinja2
|
||||
mock==3.0.5 # via moto
|
||||
more-itertools==7.1.0 # via pytest
|
||||
moto==1.3.11
|
||||
nose==1.3.7
|
||||
packaging==19.0 # via pytest
|
||||
pbr==5.4.0 # via stevedore
|
||||
pluggy==0.12.0 # via pytest
|
||||
py==1.8.0 # via pytest
|
||||
pyasn1==0.4.5 # via rsa
|
||||
pycparser==2.19 # via cffi
|
||||
pyflakes==2.1.1
|
||||
pyparsing==2.4.0 # via packaging
|
||||
pyrsistent==0.15.3 # via jsonschema
|
||||
pytest-flask==0.15.0
|
||||
pytest-mock==1.10.4
|
||||
pytest==5.0.1
|
||||
python-dateutil==2.8.0 # via botocore, faker, freezegun, moto
|
||||
python-jose==3.0.1 # via moto
|
||||
pytz==2019.1 # via datetime, moto
|
||||
pyyaml==5.1.1
|
||||
redis==3.2.1 # via fakeredis
|
||||
requests-mock==1.6.0
|
||||
requests==2.22.0 # via cfn-lint, docker, moto, requests-mock, responses
|
||||
responses==0.10.6 # via moto
|
||||
mock==4.0.2 # via moto
|
||||
more-itertools==8.2.0 # via pytest
|
||||
moto==1.3.14 # via -r requirements-tests.in
|
||||
networkx==2.4 # via cfn-lint
|
||||
nose==1.3.7 # via -r requirements-tests.in
|
||||
packaging==20.3 # via pytest
|
||||
pathspec==0.8.0 # via black
|
||||
pbr==5.4.5 # via stevedore
|
||||
pluggy==0.13.1 # via pytest
|
||||
py==1.8.1 # via pytest
|
||||
pyasn1==0.4.8 # via python-jose, rsa
|
||||
pycparser==2.20 # via cffi
|
||||
pyflakes==2.2.0 # via -r requirements-tests.in
|
||||
pyparsing==2.4.7 # via packaging
|
||||
pyrsistent==0.16.0 # via jsonschema
|
||||
pytest-flask==1.0.0 # via -r requirements-tests.in
|
||||
pytest-mock==3.1.0 # via -r requirements-tests.in
|
||||
pytest==5.4.2 # via -r requirements-tests.in, pytest-flask, pytest-mock
|
||||
python-dateutil==2.8.1 # via botocore, faker, freezegun, moto
|
||||
python-jose==3.1.0 # via moto
|
||||
pytz==2019.3 # via moto
|
||||
pyyaml==5.3.1 # via -r requirements-tests.in, bandit, cfn-lint, moto
|
||||
redis==3.5.2 # via fakeredis
|
||||
regex==2020.4.4 # via black
|
||||
requests-mock==1.8.0 # via -r requirements-tests.in
|
||||
requests==2.23.0 # via docker, moto, requests-mock, responses
|
||||
responses==0.10.12 # via moto
|
||||
rsa==4.0 # via python-jose
|
||||
s3transfer==0.2.1 # via boto3
|
||||
six==1.12.0 # via aws-sam-translator, bandit, cfn-lint, cryptography, docker, faker, freezegun, jsonschema, mock, moto, packaging, pyrsistent, python-dateutil, python-jose, requests-mock, responses, stevedore, websocket-client
|
||||
smmap2==2.0.5 # via gitdb2
|
||||
sshpubkeys==3.1.0 # via moto
|
||||
s3transfer==0.3.3 # via boto3
|
||||
six==1.14.0 # via aws-sam-translator, bandit, cfn-lint, cryptography, docker, ecdsa, fakeredis, freezegun, jsonschema, moto, packaging, pyrsistent, python-dateutil, python-jose, requests-mock, responses, stevedore, websocket-client
|
||||
smmap==3.0.2 # via gitdb
|
||||
sortedcontainers==2.1.0 # via fakeredis
|
||||
stevedore==1.30.1 # via bandit
|
||||
text-unidecode==1.2 # via faker
|
||||
sshpubkeys==3.1.0 # via moto
|
||||
stevedore==1.32.0 # via bandit
|
||||
text-unidecode==1.3 # via faker
|
||||
toml==0.10.0 # via black
|
||||
urllib3==1.25.3 # via botocore, requests
|
||||
wcwidth==0.1.7 # via pytest
|
||||
websocket-client==0.56.0 # via docker
|
||||
werkzeug==0.15.4 # via flask, moto, pytest-flask
|
||||
wrapt==1.11.2 # via aws-xray-sdk
|
||||
typed-ast==1.4.1 # via black
|
||||
urllib3==1.25.8 # via botocore, requests
|
||||
wcwidth==0.1.9 # via pytest
|
||||
websocket-client==0.57.0 # via docker
|
||||
werkzeug==1.0.1 # via flask, moto, pytest-flask
|
||||
wrapt==1.12.1 # via aws-xray-sdk
|
||||
xmltodict==0.12.0 # via moto
|
||||
zipp==0.5.2 # via importlib-metadata
|
||||
zope.interface==4.6.0 # via datetime
|
||||
zipp==3.1.0 # via importlib-metadata
|
||||
|
||||
# The following packages are considered to be unsafe in a requirements file:
|
||||
# setuptools==41.0.1 # via cfn-lint, jsonschema, zope.interface
|
||||
# setuptools
|
||||
|
|
|
@ -28,11 +28,10 @@ gunicorn
|
|||
hvac # required for the vault destination plugin
|
||||
inflection
|
||||
jinja2
|
||||
kombu<4.6.0 # Bug with inspecting active tasks: https://github.com/celery/kombu/issues/1051
|
||||
lockfile
|
||||
logmatic-python
|
||||
marshmallow-sqlalchemy
|
||||
marshmallow
|
||||
marshmallow<2.20.5 #schema duplicate issues https://github.com/marshmallow-code/marshmallow-sqlalchemy/issues/121
|
||||
ndg-httpsclient
|
||||
paramiko # required for the SFTP destination plugin
|
||||
pem
|
||||
|
|
148
requirements.txt
148
requirements.txt
|
@ -4,94 +4,92 @@
|
|||
#
|
||||
# pip-compile --no-index --output-file=requirements.txt requirements.in
|
||||
#
|
||||
acme==0.36.0
|
||||
alembic-autogenerate-enums==0.0.2
|
||||
alembic==1.0.11 # via flask-migrate
|
||||
amqp==2.5.0 # via kombu
|
||||
aniso8601==7.0.0 # via flask-restful
|
||||
arrow==0.14.2
|
||||
asn1crypto==0.24.0 # via cryptography
|
||||
asyncpool==1.0
|
||||
acme==1.4.0 # via -r requirements.in
|
||||
alembic-autogenerate-enums==0.0.2 # via -r requirements.in
|
||||
alembic==1.4.2 # via flask-migrate
|
||||
amqp==2.5.2 # via kombu
|
||||
aniso8601==8.0.0 # via flask-restful
|
||||
arrow==0.15.6 # via -r requirements.in
|
||||
asyncpool==1.0 # via -r requirements.in
|
||||
bcrypt==3.1.7 # via flask-bcrypt, paramiko
|
||||
billiard==3.6.0.0 # via celery
|
||||
billiard==3.6.3.0 # via celery
|
||||
blinker==1.4 # via flask-mail, flask-principal, raven
|
||||
boto3==1.9.187
|
||||
botocore==1.12.187
|
||||
celery[redis]==4.3.0
|
||||
certifi==2019.6.16
|
||||
certsrv==2.1.1
|
||||
cffi==1.12.3 # via bcrypt, cryptography, pynacl
|
||||
boto3==1.13.11 # via -r requirements.in
|
||||
botocore==1.16.11 # via -r requirements.in, boto3, s3transfer
|
||||
celery[redis]==4.4.2 # via -r requirements.in
|
||||
certifi==2020.4.5.1 # via -r requirements.in, requests
|
||||
certsrv==2.1.1 # via -r requirements.in
|
||||
cffi==1.14.0 # via bcrypt, cryptography, pynacl
|
||||
chardet==3.0.4 # via requests
|
||||
click==7.0 # via flask
|
||||
cloudflare==2.3.0
|
||||
cryptography==2.7
|
||||
dnspython3==1.15.0
|
||||
click==7.1.1 # via flask
|
||||
cloudflare==2.7.1 # via -r requirements.in
|
||||
cryptography==2.9.2 # via -r requirements.in, acme, josepy, paramiko, pyopenssl, requests
|
||||
dnspython3==1.15.0 # via -r requirements.in
|
||||
dnspython==1.15.0 # via dnspython3
|
||||
docutils==0.14 # via botocore
|
||||
dyn==1.8.1
|
||||
flask-bcrypt==0.7.1
|
||||
flask-cors==3.0.8
|
||||
flask-mail==0.9.1
|
||||
flask-migrate==2.5.2
|
||||
flask-principal==0.4.0
|
||||
flask-replicated==1.3
|
||||
flask-restful==0.3.7
|
||||
flask-script==2.0.6
|
||||
flask-sqlalchemy==2.4.0
|
||||
flask==1.1.1
|
||||
future==0.17.1
|
||||
gunicorn==19.9.0
|
||||
hvac==0.9.3
|
||||
idna==2.8 # via requests
|
||||
inflection==0.3.1
|
||||
docutils==0.15.2 # via botocore
|
||||
dyn==1.8.1 # via -r requirements.in
|
||||
flask-bcrypt==0.7.1 # via -r requirements.in
|
||||
flask-cors==3.0.8 # via -r requirements.in
|
||||
flask-mail==0.9.1 # via -r requirements.in
|
||||
flask-migrate==2.5.3 # via -r requirements.in
|
||||
flask-principal==0.4.0 # via -r requirements.in
|
||||
flask-replicated==1.3 # via -r requirements.in
|
||||
flask-restful==0.3.8 # via -r requirements.in
|
||||
flask-script==2.0.6 # via -r requirements.in
|
||||
flask-sqlalchemy==2.4.1 # via -r requirements.in, flask-migrate
|
||||
flask==1.1.2 # via -r requirements.in, flask-bcrypt, flask-cors, flask-mail, flask-migrate, flask-principal, flask-restful, flask-script, flask-sqlalchemy, raven
|
||||
future==0.18.2 # via -r requirements.in, cloudflare
|
||||
gunicorn==20.0.4 # via -r requirements.in
|
||||
hvac==0.10.1 # via -r requirements.in
|
||||
idna==2.9 # via requests
|
||||
inflection==0.4.0 # via -r requirements.in
|
||||
itsdangerous==1.1.0 # via flask
|
||||
javaobj-py3==0.3.0 # via pyjks
|
||||
jinja2==2.10.1
|
||||
jmespath==0.9.4 # via boto3, botocore
|
||||
josepy==1.2.0 # via acme
|
||||
javaobj-py3==0.4.0.1 # via pyjks
|
||||
jinja2==2.11.2 # via -r requirements.in, flask
|
||||
jmespath==0.9.5 # via boto3, botocore
|
||||
josepy==1.3.0 # via acme
|
||||
jsonlines==1.2.0 # via cloudflare
|
||||
kombu==4.5.0
|
||||
lockfile==0.12.2
|
||||
logmatic-python==0.1.7
|
||||
mako==1.0.13 # via alembic
|
||||
kombu==4.6.8 # via celery
|
||||
lockfile==0.12.2 # via -r requirements.in
|
||||
logmatic-python==0.1.7 # via -r requirements.in
|
||||
mako==1.1.2 # via alembic
|
||||
markupsafe==1.1.1 # via jinja2, mako
|
||||
marshmallow-sqlalchemy==0.17.0
|
||||
marshmallow==2.19.5
|
||||
mock==3.0.5 # via acme
|
||||
ndg-httpsclient==0.5.1
|
||||
paramiko==2.6.0
|
||||
pem==19.1.0
|
||||
psycopg2==2.8.3
|
||||
pyasn1-modules==0.2.5 # via pyjks, python-ldap
|
||||
pyasn1==0.4.5 # via ndg-httpsclient, pyasn1-modules, pyjks, python-ldap
|
||||
pycparser==2.19 # via cffi
|
||||
pycryptodomex==3.8.2 # via pyjks
|
||||
pyjks==19.0.0
|
||||
pyjwt==1.7.1
|
||||
marshmallow-sqlalchemy==0.23.0 # via -r requirements.in
|
||||
marshmallow==2.20.4 # via -r requirements.in, marshmallow-sqlalchemy
|
||||
ndg-httpsclient==0.5.1 # via -r requirements.in
|
||||
paramiko==2.7.1 # via -r requirements.in
|
||||
pem==20.1.0 # via -r requirements.in
|
||||
psycopg2==2.8.5 # via -r requirements.in
|
||||
pyasn1-modules==0.2.8 # via pyjks, python-ldap
|
||||
pyasn1==0.4.8 # via ndg-httpsclient, pyasn1-modules, pyjks, python-ldap
|
||||
pycparser==2.20 # via cffi
|
||||
pycryptodomex==3.9.7 # via pyjks
|
||||
pyjks==20.0.0 # via -r requirements.in
|
||||
pyjwt==1.7.1 # via -r requirements.in
|
||||
pynacl==1.3.0 # via paramiko
|
||||
pyopenssl==19.0.0
|
||||
pyopenssl==19.1.0 # via -r requirements.in, acme, josepy, ndg-httpsclient, requests
|
||||
pyrfc3339==1.1 # via acme
|
||||
python-dateutil==2.8.0 # via alembic, arrow, botocore
|
||||
python-dateutil==2.8.1 # via alembic, arrow, botocore
|
||||
python-editor==1.0.4 # via alembic
|
||||
python-json-logger==0.1.11 # via logmatic-python
|
||||
python-ldap==3.2.0
|
||||
pytz==2019.1 # via acme, celery, flask-restful, pyrfc3339
|
||||
pyyaml==5.1.1
|
||||
raven[flask]==6.10.0
|
||||
redis==3.2.1
|
||||
python-ldap==3.2.0 # via -r requirements.in
|
||||
pytz==2019.3 # via acme, celery, flask-restful, pyrfc3339
|
||||
pyyaml==5.3.1 # via -r requirements.in, cloudflare
|
||||
raven[flask]==6.10.0 # via -r requirements.in
|
||||
redis==3.5.2 # via -r requirements.in, celery
|
||||
requests-toolbelt==0.9.1 # via acme
|
||||
requests[security]==2.22.0
|
||||
retrying==1.3.3
|
||||
s3transfer==0.2.1 # via boto3
|
||||
six==1.12.0
|
||||
sqlalchemy-utils==0.34.0
|
||||
sqlalchemy==1.3.5 # via alembic, flask-sqlalchemy, marshmallow-sqlalchemy, sqlalchemy-utils
|
||||
tabulate==0.8.3
|
||||
requests[security]==2.23.0 # via -r requirements.in, acme, certsrv, cloudflare, hvac, requests-toolbelt
|
||||
retrying==1.3.3 # via -r requirements.in
|
||||
s3transfer==0.3.3 # via boto3
|
||||
six==1.14.0 # via -r requirements.in, acme, bcrypt, cryptography, flask-cors, flask-restful, hvac, josepy, jsonlines, pynacl, pyopenssl, python-dateutil, retrying, sqlalchemy-utils
|
||||
sqlalchemy-utils==0.36.5 # via -r requirements.in
|
||||
sqlalchemy==1.3.16 # via alembic, flask-sqlalchemy, marshmallow-sqlalchemy, sqlalchemy-utils
|
||||
tabulate==0.8.7 # via -r requirements.in
|
||||
twofish==0.3.0 # via pyjks
|
||||
urllib3==1.25.3 # via botocore, requests
|
||||
urllib3==1.25.8 # via botocore, requests
|
||||
vine==1.3.0 # via amqp, celery
|
||||
werkzeug==0.15.4 # via flask
|
||||
xmltodict==0.12.0
|
||||
werkzeug==1.0.1 # via flask
|
||||
xmltodict==0.12.0 # via -r requirements.in
|
||||
|
||||
# The following packages are considered to be unsafe in a requirements file:
|
||||
# setuptools==41.0.1 # via acme, josepy
|
||||
# setuptools
|
||||
|
|
23
setup.py
23
setup.py
|
@ -23,7 +23,11 @@ from setuptools import setup, find_packages
|
|||
from subprocess import check_output
|
||||
|
||||
import pip
|
||||
if tuple(map(int, pip.__version__.split('.'))) >= (10, 0, 0):
|
||||
if tuple(map(int, pip.__version__.split('.'))) >= (19, 3, 0):
|
||||
from pip._internal.network.session import PipSession
|
||||
from pip._internal.req import parse_requirements
|
||||
|
||||
elif tuple(map(int, pip.__version__.split('.'))) >= (10, 0, 0):
|
||||
from pip._internal.download import PipSession
|
||||
from pip._internal.req import parse_requirements
|
||||
else:
|
||||
|
@ -41,15 +45,19 @@ with open(os.path.join(ROOT, 'lemur', '__about__.py')) as f:
|
|||
exec(f.read(), about) # nosec: about file is benign
|
||||
|
||||
install_requires_g = parse_requirements("requirements.txt", session=PipSession())
|
||||
install_requires = [str(ir.req) for ir in install_requires_g]
|
||||
|
||||
tests_require_g = parse_requirements("requirements-tests.txt", session=PipSession())
|
||||
tests_require = [str(ir.req) for ir in tests_require_g]
|
||||
|
||||
docs_require_g = parse_requirements("requirements-docs.txt", session=PipSession())
|
||||
docs_require = [str(ir.req) for ir in docs_require_g]
|
||||
|
||||
dev_requires_g = parse_requirements("requirements-dev.txt", session=PipSession())
|
||||
|
||||
if tuple(map(int, pip.__version__.split('.'))) >= (20, 1):
|
||||
install_requires = [str(ir.requirement) for ir in install_requires_g]
|
||||
tests_require = [str(ir.requirement) for ir in tests_require_g]
|
||||
docs_require = [str(ir.requirement) for ir in docs_require_g]
|
||||
dev_requires = [str(ir.requirement) for ir in dev_requires_g]
|
||||
else:
|
||||
install_requires = [str(ir.req) for ir in install_requires_g]
|
||||
tests_require = [str(ir.req) for ir in tests_require_g]
|
||||
docs_require = [str(ir.req) for ir in docs_require_g]
|
||||
dev_requires = [str(ir.req) for ir in dev_requires_g]
|
||||
|
||||
|
||||
|
@ -147,6 +155,7 @@ setup(
|
|||
'java_keystore_export = lemur.plugins.lemur_jks.plugin:JavaKeystoreExportPlugin',
|
||||
'openssl_export = lemur.plugins.lemur_openssl.plugin:OpenSSLExportPlugin',
|
||||
'atlas_metric = lemur.plugins.lemur_atlas.plugin:AtlasMetricPlugin',
|
||||
'atlas_metric_redis = lemur.plugins.lemur_atlas_redis.plugin:AtlasMetricRedisPlugin',
|
||||
'kubernetes_destination = lemur.plugins.lemur_kubernetes.plugin:KubernetesDestinationPlugin',
|
||||
'cryptography_issuer = lemur.plugins.lemur_cryptography.plugin:CryptographyIssuerPlugin',
|
||||
'cfssl_issuer = lemur.plugins.lemur_cfssl.plugin:CfsslIssuerPlugin',
|
||||
|
|
Loading…
Reference in New Issue