Full-stack developer | Devon Ray
Skills & toolsprojectsblogcontact

Deploying a FastApi project using Lambda, Docker, Aurora, CDK & Github Actions

2022-09-04   |   Tutorial
Deploying a FastApi project using Lambda, Docker, Aurora, CDK & Github Actions

A while back now I started playing around with Pythons FastApi, an easy to use extreamly performant api framework for python. I eventually had a project that was a good use case for a fast api backend. All was great with building the api but ran into some trouble figuring out how to deploy it to AWS.

To start things off lets setup our dockerfile for the project (Dockerfile.prod in your api root):

FROM public.ecr.aws/lambda/python:3.9

COPY ./requirements.txt ./requirements.txt

RUN python3 -m ensurepip
RUN pip install -r ./requirements.txt

RUN mkdir tmp_export_files
COPY ./app ${LAMBDA_TASK_ROOT}/app
COPY ./app/main.py ${LAMBDA_TASK_ROOT}
COPY ./app/__init__.py ${LAMBDA_TASK_ROOT}

CMD ["main.handler"]

Note: the dockerfile above assumes you are using a requirements.txt file for managing dependencies


Your projects main.py file should look something like this:

from fastapi import FastAPI, APIRouter
from starlette.middleware.cors import CORSMiddleware
from mangum import Mangum
from app.core.router import api_router
from app.core.database import database

router = APIRouter()

app = FastAPI(title="App Name")

app.add_middleware(
    CORSMiddleware,
    allow_origins=[*],
    allow_credentials=True,
    allow_methods=["GET", "POST", "HEAD", "PUT", "DELETE", "PATCH", "OPTIONS", "*"],
    allow_headers=["Authorization", "*"],
)

app.include_router(api_router)

@app.on_event("startup")
async def startup():
    if not database.is_connected:
        await database.connect()


@app.on_event("shutdown")
async def shutdown():
    if database.is_connected:
        await database.disconnect()
        

handler = Mangum(app)



⁠This tutorial assumes you have a basic knowledge of CDK and aws services so i won't be going into full detail on how to set that up, but here is the stack ive used, you will need to change this to match the services you've used.

import * as cdk from "@aws-cdk/core";
import * as cognito from "@aws-cdk/aws-cognito";
import * as apigateway from "@aws-cdk/aws-apigateway";
import * as lambda from "@aws-cdk/aws-lambda";
import * as ec2 from "@aws-cdk/aws-ec2";
import * as s3 from "@aws-cdk/aws-s3";
import * as iam from "@aws-cdk/aws-iam";
import * as rds from "@aws-cdk/aws-rds";
import * as secretsManager from "@aws-cdk/aws-secretsmanager";
import * as ecr from "@aws-cdk/aws-ecr";

import {
  AuthorizationType,
  CfnAuthorizer,
  LambdaIntegration,
} from "@aws-cdk/aws-apigateway";
import { Duration } from "@aws-cdk/core";

export class InfrustructureStack extends cdk.Stack {
  constructor(scope: cdk.Construct, id: string, props?: cdk.StackProps) {
    const buildResourceId = (suffix: string) =>
      `${id}-${suffix}`;

    super(scope, id, props);

    const vpc = new ec2.Vpc(this, buildResourceId("vpc"), {
      natGateways: 1,
    });

    const dbSubnetGroup = new rds.CfnDBSubnetGroup(
      this,
      buildResourceId("db-subnet"),
      {
        dbSubnetGroupDescription: "Subnet group to access database",
        dbSubnetGroupName: buildResourceId("db-subnet"),
        subnetIds: vpc.privateSubnets.map((s) => s.subnetId),
      }
    );

    const dbSecurityGroup = new ec2.SecurityGroup(
      this,
      buildResourceId("db-security-group"),
      {
        vpc,
        allowAllOutbound: false,
      }
    );

    const dbCredentialsSecret = new rds.DatabaseSecret(
      this,
      buildResourceId("db-secret"),
      {
        username: "dbadminuser",
      }
    );

    const dbCluster = new rds.CfnDBCluster(this, buildResourceId("db"), {
      dbClusterIdentifier: buildResourceId("db"),
      dbSubnetGroupName: dbSubnetGroup.dbSubnetGroupName,
      vpcSecurityGroupIds: [dbSecurityGroup.securityGroupId],
      engineMode: "serverless",
      engine: "aurora-postgresql",
      engineVersion: "10.12",
      databaseName: "dbname",
      masterUsername: dbCredentialsSecret
        .secretValueFromJson("username")
        .toString(),
      masterUserPassword: dbCredentialsSecret
        .secretValueFromJson("password")
        .toString(),
      backupRetentionPeriod: 1,
      scalingConfiguration: {
        autoPause: false,
        minCapacity: 2,
        maxCapacity: 2,
      },
      deletionProtection: true,
      enableHttpEndpoint: true,
    });

    dbCluster.addDependsOn(dbSubnetGroup);

    dbCredentialsSecret.attach({
      asSecretAttachmentTarget: () => ({
        targetId: dbCluster.ref,
        targetType: secretsManager.AttachmentTargetType.RDS_DB_CLUSTER,
      }),
    });

    const dbPort = ec2.Port.tcp(cdk.Token.asNumber(dbCluster.attrEndpointPort));

    const dbConnections = new ec2.Connections({
      securityGroups: [dbSecurityGroup],
      defaultPort: dbPort,
    });

    dbSecurityGroup.addIngressRule(
      ec2.Peer.anyIpv4(),
      ec2.Port.tcp(22),
      "allow SSH access from anywhere"
    );

    const userPool = new cognito.UserPool(this, buildResourceId("pool"), {
      userPoolName: buildResourceId("pool"),
      selfSignUpEnabled: true,
      accountRecovery: cognito.AccountRecovery.EMAIL_ONLY,
      userVerification: {
        emailSubject: "Verify your email address to use app",
        emailStyle: cognito.VerificationEmailStyle.CODE,
      },
      autoVerify: {
        email: true,
      },
      signInAliases: {
        email: true,
      },
      standardAttributes: {
        fullname: {
          required: true,
          mutable: false,
        },
        email: {
          required: true,
          mutable: true,
        },
      },
      signInCaseSensitive: false,
      removalPolicy: cdk.RemovalPolicy.DESTROY,
    });

    const userPoolClient = new cognito.UserPoolClient(
      this,
      buildResourceId("pool-client"),
      {
        userPool,
      }
    );

    const ecrRepository = ecr.Repository.fromRepositoryName(
      this,
      buildResourceId("ecr-repository"),
      buildResourceId("ecr-repository")
    );

    const httpLambda2Code = lambda.Code.fromEcrImage(ecrRepository, {
      tag: "latest",
      cmd: ["main.handler"],
    });

    const bucket = new s3.Bucket(this, buildResourceId("image-store"), {
      bucketName: buildResourceId("image-store"),
      removalPolicy: cdk.RemovalPolicy.DESTROY,
      autoDeleteObjects: true,
    });

    const lambdaEnvironment = {
      DATABASE_URL: `postgresql://${dbCluster.masterUsername as string}:${
        dbCluster.masterUserPassword as string
      }@${dbCluster.attrEndpointAddress}:${dbCluster.attrEndpointPort}/${
        dbCluster.databaseName as string
      }`,
      COGNITO_REGION: "eu-west-1",
      COGNITO_USERPOOLID: userPool.userPoolId,
      COGNITO_APPCLIENTID: userPoolClient.userPoolClientId,
      S3_BUCKET: bucket.bucketName,
      TMP_EXPORT_PATH: "/tmp",
      APP_ENV: "PROD",
    };

    const httpLambda2 = new lambda.Function(
      this,
      buildResourceId("lambda-http"),
      {
        functionName: buildResourceId("lambda-http"),
        memorySize: 512,
        timeout: Duration.seconds(8),
        handler: lambda.Handler.FROM_IMAGE,
        runtime: lambda.Runtime.FROM_IMAGE,
        code: httpLambda2Code,
        environment: lambdaEnvironment,
        vpc,
      }
    );

    bucket.grantReadWrite(httpLambda2);

    httpLambda2.connections.allowTo(
      {
        connections: dbConnections,
      },
      dbPort
    );

    httpLambda2.addToRolePolicy(
      new iam.PolicyStatement({
        actions: ["*"],
        resources: [userPool.userPoolArn],
      })
    );

    bucket.grantReadWrite(httpLambda2);

    httpLambda2.connections.allowTo(
      {
        connections: dbConnections,
      },
      dbPort
    );

    const migrationLambdaCode = lambda.Code.fromEcrImage(ecrRepository, {
      tag: "latest",
      cmd: ["alembic.config.main"],
    });

    const migrationLambda = new lambda.Function(
      this,
      buildResourceId("lambda-migration"),
      {
        functionName: buildResourceId("lambda-migration"),
        vpc,
        code: migrationLambdaCode,
        handler: lambda.Handler.FROM_IMAGE,
        runtime: lambda.Runtime.FROM_IMAGE,
        timeout: cdk.Duration.seconds(600),
        tracing: lambda.Tracing.ACTIVE,
        environment: lambdaEnvironment,
      }
    );

    migrationLambda.connections.allowTo(
      {
        connections: dbConnections,
      },
      dbPort
    );

    const authIntegration = new LambdaIntegration(httpLambda2);

    const apiGateway = new apigateway.LambdaRestApi(
      this,
      buildResourceId("api-gateway"),
      {
        handler: httpLambda2,
      }
    );

    const auth = new CfnAuthorizer(this, "APIGatewayAuthorizer", {
      name: buildResourceId("api-authorizer"),
      restApiId: apiGateway.restApiId,
      identitySource: "method.request.header.Authorization",
      providerArns: [userPool.userPoolArn],
      type: AuthorizationType.COGNITO,
    });

    new cdk.CfnOutput(this, "UserPoolId", {
      value: userPool.userPoolId,
    });

    new cdk.CfnOutput(this, "UserPoolClientId", {
      value: userPoolClient.userPoolClientId,
    });

    new cdk.CfnOutput(this, "ApiEndpoint", {
      value: apiGateway.url,
    });
  }
}

As you can see i have setup 2 lambda functions in that stack, one for the api and one for running migrations. Also take note of the cmds i've used for each.

⁠Now you will have to create and deploy an image to your ecs stack, if you tried to deploy your stack like this you will get an error as the lambda's are going to try and get the codebase from the ecs repo that doesn't currently exist.

Once you've deployed your stack to aws, you can now continue to setup your github actions, an example of such is as follows.

.github/workflows/onPushToMain.yml

name: Deploy development changes on push to main branch

on:
  push:
    branches: [main]
env:
  AWS_ECS_REPO: REPLACE_WITH_ECS_REPO_URL:latest
  MIGRATION_LAMBDA_NAME: 
  HTTP_LAMBDA_NAME:

jobs:
  build-container-and-push-to-registry:
    name: "Build container and push to registry"
    runs-on: ubuntu-latest
    steps:
      - uses: actions/checkout@v2
      - name: "Build container image"
        run: docker build -t ${{ env.AWS_ECS_REPO }} . -f Dockerfile.prod
      - name: Configure AWS credentials
        uses: aws-actions/configure-aws-credentials@v1
        with:
          aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
          aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
          aws-region: ${{ secrets.AWS_DEFAULT_REGION }}
      - name: Login to Amazon ECR
        id: login-ecr
        uses: aws-actions/amazon-ecr-login@v1
      - name: "Push image to ECR"
        env:
          ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
          ECR_REPOSITORY: REPLACE_WITH_ECS_REPO_NAME
          IMAGE_TAG: latest
        run: docker push ${{ env.AWS_ECS_REPO }}
  update-migration-lambda-and-run:
    name: "Update migration lambda and run migrations"
    runs-on: ubuntu-latest
    needs: build-container-and-push-to-registry
    steps:
      - name: "Configure AWS Credentials"
        uses: aws-actions/configure-aws-credentials@v1
        with:
          aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
          aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
          aws-region: eu-west-1
      - uses: actions/checkout@v2
      - name: "Update migration lambda code"
        run: |
          aws lambda update-function-code --function-name ${{ env.MIGRATION_LAMBDA_NAME }} --image-uri ${{ env.AWS_ECS_REPO }} --publish
      - name: "Wait 30 seconds"
        run: |
          sleep 30
      - name: "Run migrations"
        run: |
          aws lambda invoke --function-name ${{ env.MIGRATION_LAMBDA_NAME }} --cli-binary-format raw-in-base64-out --payload '["--config=/var/task/app/alembic.ini", "upgrade", "head"]' result.json
      - name: "Display migrations result"
        run: cat result.json
  update-http-api-lambda:
    name: "Update HTTP API lambda"
    runs-on: ubuntu-latest
    needs: build-container-and-push-to-registry
    steps:
      - name: Configure AWS credentials
        uses: aws-actions/configure-aws-credentials@v1
        with:
          aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
          aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
          aws-region: ${{ secrets.AWS_DEFAULT_REGION }}
      - name: "Update lambda"
        run: |
          aws lambda update-function-code --function-name ${{ env.HTTP_LAMBDA_NAME }} --image-uri ${{ env.AWS_ECS_REPO }} --publish
⁠
⁠

For the above config to work you will need to set the following secrets in github:

AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY
AWS_DEFAULT_REGION

You will also need to update the env variables in the top of the file.

Feel free to reach out if you've found any issues with the above tutorial. Hopfully this has helped you with deploying a FastApi project using Lambda, Docker, Aurora, CDK & Github Actions

Thanks for reading my article Deploying a FastApi project using Lambda, Docker, Aurora, CDK & Github Actions, if you like reading my articles and tutorials feel free to share them and drop me an email with any suggestions on new topics or even for a chat to discuss your next development project.

back to blog

let's work together

devon@devonray.com
Freelance mobile app developerFull-stack developer

© 2023