refacto(react-rust-postgres): replace rocket by actix-web (#153)

Signed-off-by: Jérémie Drouet <jeremie.drouet@gmail.com>
This commit is contained in:
Jérémie Drouet 2021-07-06 19:00:16 +02:00 committed by GitHub
parent 0ae9d4cea7
commit a13fabe604
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 114 additions and 172 deletions

View File

@ -7,17 +7,11 @@ edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies] [dependencies]
diesel_migrations = "1.4.0" actix-web = "4.0.0-beta.8"
rocket = "0.4.4" deadpool-postgres = "0.9.0"
env_logger = "^0.8"
log = "^0.4"
serde = "1.0" serde = "1.0"
serde_json = "1.0" serde_json = "1.0"
serde_derive = "1.0" tokio-postgres = "^0.7"
tokio-postgres-migration = "^0.1"
[dependencies.diesel]
version = "1.0.0"
features = ["postgres"]
[dependencies.rocket_contrib]
version = "0.4.4"
default-features = false
features = ["json", "diesel_postgres_pool"]

View File

@ -1,8 +1,4 @@
FROM jdrouet/rust-nightly:buster-slim AS base FROM rust:buster AS base
RUN apt-get update \
&& apt-get install -y libpq-dev \
&& rm -rf /var/lib/apt/lists/*
ENV USER=root ENV USER=root
ENV ROCKET_ADDRESS=0.0.0.0 ENV ROCKET_ADDRESS=0.0.0.0

View File

@ -1,5 +0,0 @@
# For documentation on how to configure this file,
# see diesel.rs/guides/configuring-diesel-cli
[print_schema]
file = "src/schema.rs"

View File

@ -1,6 +0,0 @@
-- This file was automatically created by Diesel to setup helper functions
-- and other internal bookkeeping. This file is safe to edit, any future
-- changes will be added to existing projects as new migrations.
DROP FUNCTION IF EXISTS diesel_manage_updated_at(_tbl regclass);
DROP FUNCTION IF EXISTS diesel_set_updated_at();

View File

@ -1,36 +0,0 @@
-- This file was automatically created by Diesel to setup helper functions
-- and other internal bookkeeping. This file is safe to edit, any future
-- changes will be added to existing projects as new migrations.
-- Sets up a trigger for the given table to automatically set a column called
-- `updated_at` whenever the row is modified (unless `updated_at` was included
-- in the modified columns)
--
-- # Example
--
-- ```sql
-- CREATE TABLE users (id SERIAL PRIMARY KEY, updated_at TIMESTAMP NOT NULL DEFAULT NOW());
--
-- SELECT diesel_manage_updated_at('users');
-- ```
CREATE OR REPLACE FUNCTION diesel_manage_updated_at(_tbl regclass) RETURNS VOID AS $$
BEGIN
EXECUTE format('CREATE TRIGGER set_updated_at BEFORE UPDATE ON %s
FOR EACH ROW EXECUTE PROCEDURE diesel_set_updated_at()', _tbl);
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION diesel_set_updated_at() RETURNS trigger AS $$
BEGIN
IF (
NEW IS DISTINCT FROM OLD AND
NEW.updated_at IS NOT DISTINCT FROM OLD.updated_at
) THEN
NEW.updated_at := current_timestamp;
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;

View File

@ -1,4 +1,4 @@
# Backend # Backend
This backend is made with Rust using [Rocket](https://rocket.rs/) as a web server and [Diesel](https://diesel.rs/) as an ORM. This backend is made with Rust using [actix-web](https://actix.rs/) as a web server and [deadpool-postgres](https://crates.io/crates/deadpool-postgres) as a connection manager.

View File

@ -1,96 +1,45 @@
#![feature(proc_macro_hygiene, decl_macro)] use actix_web::{get, web, App, HttpResponse, HttpServer};
use deadpool_postgres::Pool;
#[macro_use] mod postgres;
extern crate diesel;
#[macro_use]
extern crate diesel_migrations;
#[macro_use]
extern crate rocket;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate rocket_contrib;
mod schema;
mod user; mod user;
use rocket::config::{Config, Environment, Value}; #[get("/users")]
use rocket::fairing::AdHoc; async fn list_users(pool: web::Data<Pool>) -> HttpResponse {
use rocket_contrib::json::Json; let client = match pool.get().await {
use std::collections::HashMap; Ok(client) => client,
use std::env; Err(err) => {
log::debug!("unable to get postgres client: {:?}", err);
// This macro from `diesel_migrations` defines an `embedded_migrations` module return HttpResponse::InternalServerError().json("unable to get postgres client");
// containing a function named `run`. This allows the example to be run and }
// tested without any outside setup of the database. };
embed_migrations!(); match user::User::all(&**client).await {
Ok(list) => HttpResponse::Ok().json(list),
#[database("my_db")] Err(err) => {
struct MyDBConn(diesel::PgConnection); log::debug!("unable to fetch users: {:?}", err);
return HttpResponse::InternalServerError().json("unable to fetch users");
#[derive(Serialize)] }
struct HelloMessage { }
message: String,
} }
#[get("/")] fn address() -> String {
fn index(conn: MyDBConn) -> Json<HelloMessage> { std::env::var("ADDRESS").unwrap_or_else(|_| "127.0.0.1:8000".into())
let result = match user::User::all(&*conn) { }
Ok(res) => res.len(),
Err(_) => 0,
};
Json(HelloMessage { #[actix_web::main]
message: format!("Hello with {} users", result), async fn main() -> std::io::Result<()> {
env_logger::init();
let pg_pool = postgres::create_pool();
postgres::migrate_up(&pg_pool).await;
let address = address();
HttpServer::new(move || {
App::new()
.app_data(web::Data::new(pg_pool.clone()))
.service(list_users)
}) })
} .bind(&address)?
.run()
fn get_config() -> Config { .await
let mut database_config = HashMap::new();
let mut databases = HashMap::new();
let env_address = env::var("ROCKET_ADDRESS")
.or::<String>(Ok(String::from("localhost")))
.unwrap();
let env_mode = env::var("ROCKET_ENV")
.or(Ok(String::from("development")))
.and_then(|value| value.parse::<Environment>())
.unwrap();
let database_url = match env::var("DATABASE_URL") {
Ok(value) => value,
Err(_) => String::from("postgres://localhost/postgres"),
};
database_config.insert("url", Value::from(database_url));
databases.insert("my_db", Value::from(database_config));
let config = Config::build(env_mode)
.address(env_address)
.extra("databases", databases)
.finalize()
.unwrap();
config
}
fn run_db_migrations(r: rocket::Rocket) -> Result<rocket::Rocket, rocket::Rocket> {
let conn = MyDBConn::get_one(&r).expect("database connection");
match embedded_migrations::run(&*conn) {
Ok(()) => Ok(r),
Err(e) => {
println!("Failed to run database migrations: {:?}", e);
Err(r)
}
}
}
fn main() {
let config = get_config();
rocket::custom(config)
.attach(MyDBConn::fairing())
.attach(AdHoc::on_attach("Database Migrations", run_db_migrations))
.mount("/", routes![index])
.launch();
} }

View File

@ -0,0 +1,40 @@
use deadpool_postgres::{Config, Pool};
use tokio_postgres::NoTls;
use tokio_postgres_migration::Migration;
const SCRIPTS_UP: [(&str, &str); 1] = [(
"0001_create-users",
include_str!("../migrations/0001_create-users_up.sql"),
)];
fn create_config() -> Config {
let mut cfg = Config::new();
if let Ok(host) = std::env::var("PG_HOST") {
cfg.host = Some(host);
}
if let Ok(dbname) = std::env::var("PG_DBNAME") {
cfg.dbname = Some(dbname);
}
if let Ok(user) = std::env::var("PG_USER") {
cfg.user = Some(user);
}
if let Ok(password) = std::env::var("PG_PASSWORD") {
cfg.password = Some(password);
}
cfg
}
pub fn create_pool() -> Pool {
create_config()
.create_pool(NoTls)
.expect("couldn't create postgres pool")
}
pub async fn migrate_up(pool: &Pool) {
let mut client = pool.get().await.expect("couldn't get postgres client");
let migration = Migration::new("migrations".to_string());
migration
.up(&mut **client, &SCRIPTS_UP)
.await
.expect("couldn't run migrations");
}

View File

@ -1,6 +0,0 @@
table! {
users (id) {
id -> Int4,
login -> Text,
}
}

View File

@ -1,18 +1,25 @@
#![allow(proc_macro_derive_resolution_fallback)] use tokio_postgres::{Error, GenericClient, Row};
use diesel; #[derive(Debug, serde::Serialize)]
use diesel::prelude::*;
use super::schema::users;
#[derive(Queryable, AsChangeset, Serialize, Deserialize)]
#[table_name = "users"]
pub struct User { pub struct User {
pub id: i32, pub id: i32,
pub login: String, pub login: String,
} }
impl From<Row> for User {
fn from(row: Row) -> Self {
Self {
id: row.get(0),
login: row.get(1),
}
}
}
impl User { impl User {
pub fn all(connection: &PgConnection) -> QueryResult<Vec<User>> { pub async fn all<C: GenericClient>(client: &C) -> Result<Vec<User>, Error> {
users::table.load::<User>(&*connection) let stmt = client.prepare("SELECT id, login FROM users").await?;
let rows = client.query(&stmt, &[]).await?;
Ok(rows.into_iter().map(User::from).collect())
} }
} }

View File

@ -10,12 +10,18 @@ services:
- 3000:3000 - 3000:3000
volumes: volumes:
- ./frontend/src:/code/src:ro - ./frontend/src:/code/src:ro
backend: backend:
build: build:
context: backend context: backend
target: development target: development
environment: environment:
- DATABASE_URL=postgres://postgres:mysecretpassword@db/postgres - ADDRESS=0.0.0.0:8000
- RUST_LOG=debug
- PG_DBNAME=postgres
- PG_HOST=db
- PG_USER=postgres
- PG_PASSWORD=mysecretpassword
networks: networks:
- client-side - client-side
- server-side - server-side
@ -24,6 +30,7 @@ services:
- backend-cache:/code/target - backend-cache:/code/target
depends_on: depends_on:
- db - db
db: db:
image: postgres:12-alpine image: postgres:12-alpine
restart: always restart: always
@ -35,9 +42,11 @@ services:
- 5432:5432 - 5432:5432
volumes: volumes:
- db-data:/var/lib/postgresql/data - db-data:/var/lib/postgresql/data
networks: networks:
client-side: {} client-side: {}
server-side: {} server-side: {}
volumes: volumes:
backend-cache: {} backend-cache: {}
db-data: {} db-data: {}

View File

@ -5,9 +5,9 @@ import "./App.css";
function App() { function App() {
const [message, setMessage] = useState(); const [message, setMessage] = useState();
useEffect(() => { useEffect(() => {
fetch("/api/") fetch("/api/users")
.then(res => res.json()) .then((res) => res.json())
.then(res => setMessage(res.message)) .then((res) => setMessage(res.message))
.catch(console.error); .catch(console.error);
}, [setMessage]); }, [setMessage]);
return ( return (