Skip to content

Rust Development Patterns

Master Rust development with Cursor IDE and Claude Code. This guide covers ownership patterns, async programming with Tokio, web frameworks like Actix and Axum, error handling strategies, and building safe, concurrent systems with AI assistance.

  1. Initialize Rust Project

    Terminal window
    # Create new Rust project
    Ask: "Create a new Rust project with workspace structure for a web API"
    # Or use Agent mode
    Agent: "Set up a Rust web service with Axum, SQLx, and Tokio with proper error handling"
  2. Configure Development Environment

    Cargo.toml
    [workspace]
    members = ["api", "core", "db"]
    [workspace.dependencies]
    tokio = { version = "1.35", features = ["full"] }
    serde = { version = "1.0", features = ["derive"] }
    thiserror = "1.0"
    anyhow = "1.0"
    tracing = "0.1"
  3. Set Up AI Rules

    Terminal window
    # .cursorrules or CLAUDE.md
    - Use Rust 2021 edition
    - Prefer Result<T, E> over panics
    - Use thiserror for error types
    - Apply clippy lints
    - Write idiomatic Rust code
    - Add comprehensive documentation
    - Use async/await for I/O operations
// AI Prompt
Ask: "Explain and fix the ownership issues in this code:
fn process_data(data: Vec<String>) -> Vec<String> {
let filtered = data.into_iter()
.filter(|s| s.len() > 5)
.collect();
// Want to use 'data' again here
println!('Original: {:?}', data);
filtered
}"
// AI will explain ownership transfer and suggest:
fn process_data(data: &[String]) -> Vec<String> {
let filtered: Vec<String> = data.iter()
.filter(|s| s.len() > 5)
.cloned()
.collect();
println!("Original: {:?}", data);
filtered
}
// Or with owned data:
fn process_data(data: Vec<String>) -> (Vec<String>, Vec<String>) {
let filtered = data.iter()
.filter(|s| s.len() > 5)
.cloned()
.collect();
println!("Original: {:?}", data);
(data, filtered)
}
// AI Prompt: "Implement comprehensive error handling with custom error types"
use thiserror::Error;
#[derive(Error, Debug)]
pub enum AppError {
#[error("Database error: {0}")]
Database(#[from] sqlx::Error),
#[error("Validation error: {message}")]
Validation { message: String },
#[error("Not found: {resource}")]
NotFound { resource: String },
#[error("Unauthorized")]
Unauthorized,
#[error("Internal server error")]
Internal(#[source] anyhow::Error),
}
// Result type alias
pub type Result<T> = std::result::Result<T, AppError>;
// Usage in functions
pub async fn get_user(id: i64, pool: &PgPool) -> Result<User> {
let user = sqlx::query_as!(User,
"SELECT * FROM users WHERE id = $1",
id
)
.fetch_optional(pool)
.await?
.ok_or_else(|| AppError::NotFound {
resource: format!("User {}", id),
})?;
Ok(user)
}
// Error conversion for web frameworks
impl From<AppError> for actix_web::Error {
fn from(err: AppError) -> Self {
match err {
AppError::NotFound { .. } => {
actix_web::error::ErrorNotFound(err)
}
AppError::Unauthorized => {
actix_web::error::ErrorUnauthorized(err)
}
AppError::Validation { .. } => {
actix_web::error::ErrorBadRequest(err)
}
_ => actix_web::error::ErrorInternalServerError(err),
}
}
}
// AI Prompt
Agent: "Create an async service with:
- Tokio runtime configuration
- Concurrent task management
- Graceful shutdown
- Channel communication
- Error propagation"
use tokio::sync::{mpsc, oneshot};
use tokio::task::JoinSet;
use std::time::Duration;
pub struct Service {
shutdown_tx: Option<oneshot::Sender<()>>,
task_handle: Option<tokio::task::JoinHandle<()>>,
}
impl Service {
pub fn new() -> Self {
Self {
shutdown_tx: None,
task_handle: None,
}
}
pub async fn start(&mut self) -> Result<()> {
let (shutdown_tx, shutdown_rx) = oneshot::channel();
let (task_tx, mut task_rx) = mpsc::channel::<Task>(100);
self.shutdown_tx = Some(shutdown_tx);
let handle = tokio::spawn(async move {
let mut tasks = JoinSet::new();
loop {
tokio::select! {
Some(task) = task_rx.recv() => {
tasks.spawn(async move {
process_task(task).await
});
}
_ = &mut shutdown_rx => {
info!("Shutting down service");
break;
}
}
}
// Wait for all tasks to complete
while let Some(result) = tasks.join_next().await {
if let Err(e) = result {
error!("Task failed: {}", e);
}
}
});
self.task_handle = Some(handle);
Ok(())
}
pub async fn shutdown(mut self) -> Result<()> {
if let Some(tx) = self.shutdown_tx.take() {
let _ = tx.send(());
}
if let Some(handle) = self.task_handle.take() {
handle.await?;
}
Ok(())
}
}
// AI Prompt: "Implement thread-safe shared state with async RwLock"
use std::sync::Arc;
use tokio::sync::RwLock;
use dashmap::DashMap;
#[derive(Clone)]
pub struct Cache {
data: Arc<DashMap<String, CachedItem>>,
config: Arc<RwLock<CacheConfig>>,
}
#[derive(Clone)]
struct CachedItem {
value: String,
expires_at: Instant,
}
impl Cache {
pub fn new(config: CacheConfig) -> Self {
Self {
data: Arc::new(DashMap::new()),
config: Arc::new(RwLock::new(config)),
}
}
pub async fn get(&self, key: &str) -> Option<String> {
let item = self.data.get(key)?;
if item.expires_at > Instant::now() {
Some(item.value.clone())
} else {
// Remove expired item
drop(item);
self.data.remove(key);
None
}
}
pub async fn set(&self, key: String, value: String) -> Result<()> {
let config = self.config.read().await;
let expires_at = Instant::now() + config.ttl;
self.data.insert(key, CachedItem {
value,
expires_at,
});
Ok(())
}
pub async fn update_config(&self, new_config: CacheConfig) -> Result<()> {
let mut config = self.config.write().await;
*config = new_config;
Ok(())
}
}
// AI Prompt
Agent: "Create a production Axum API with:
- JWT authentication
- Request validation
- Database connection pool
- Middleware stack
- OpenAPI documentation"
use axum::{
Router, Extension, Json,
extract::{Path, Query, State},
middleware,
response::IntoResponse,
http::StatusCode,
};
use tower::ServiceBuilder;
use tower_http::{
trace::TraceLayer,
cors::CorsLayer,
compression::CompressionLayer,
};
pub struct AppState {
db: PgPool,
redis: RedisPool,
config: Config,
}
pub fn create_app(state: AppState) -> Router {
let api_routes = Router::new()
.route("/users", get(list_users).post(create_user))
.route("/users/:id", get(get_user).put(update_user).delete(delete_user))
.route_layer(middleware::from_fn_with_state(state.clone(), auth_middleware));
Router::new()
.route("/health", get(health_check))
.nest("/api/v1", api_routes)
.layer(
ServiceBuilder::new()
.layer(TraceLayer::new_for_http())
.layer(CorsLayer::permissive())
.layer(CompressionLayer::new())
.layer(Extension(state))
)
}
async fn create_user(
State(state): State<AppState>,
Json(payload): Json<CreateUserRequest>,
) -> Result<impl IntoResponse> {
// Validate request
payload.validate()?;
// Create user in database
let user = sqlx::query_as!(
User,
r#"
INSERT INTO users (email, name, password_hash)
VALUES ($1, $2, $3)
RETURNING id, email, name, created_at
"#,
payload.email,
payload.name,
hash_password(&payload.password)?
)
.fetch_one(&state.db)
.await?;
Ok((StatusCode::CREATED, Json(user)))
}
// AI Prompt: "Implement database repository with SQLx and compile-time verification"
use sqlx::{PgPool, postgres::PgQueryResult};
pub struct UserRepository {
pool: PgPool,
}
impl UserRepository {
pub fn new(pool: PgPool) -> Self {
Self { pool }
}
pub async fn find_by_email(&self, email: &str) -> Result<Option<User>> {
let user = sqlx::query_as!(
User,
r#"
SELECT id, email, name, password_hash,
created_at, updated_at
FROM users
WHERE email = $1
"#,
email
)
.fetch_optional(&self.pool)
.await?;
Ok(user)
}
pub async fn create(&self, user: &CreateUser) -> Result<User> {
let rec = sqlx::query!(
r#"
INSERT INTO users (email, name, password_hash)
VALUES ($1, $2, $3)
RETURNING id, created_at, updated_at
"#,
user.email,
user.name,
user.password_hash
)
.fetch_one(&self.pool)
.await?;
Ok(User {
id: rec.id,
email: user.email.clone(),
name: user.name.clone(),
password_hash: user.password_hash.clone(),
created_at: rec.created_at,
updated_at: rec.updated_at,
})
}
pub async fn update(&self, id: i64, update: &UpdateUser) -> Result<User> {
let user = sqlx::query_as!(
User,
r#"
UPDATE users
SET name = COALESCE($1, name),
email = COALESCE($2, email),
updated_at = NOW()
WHERE id = $3
RETURNING id, email, name, password_hash,
created_at, updated_at
"#,
update.name,
update.email,
id
)
.fetch_one(&self.pool)
.await?;
Ok(user)
}
}
// AI Prompt
Ask: "Generate comprehensive tests with:
- Unit tests with mocking
- Integration tests with test database
- Property-based tests
- Async test helpers
- Test fixtures"
#[cfg(test)]
mod tests {
use super::*;
use mockall::predicate::*;
use proptest::prelude::*;
use sqlx::postgres::PgPoolOptions;
// Mock trait for testing
#[mockall::automock]
trait UserService {
async fn get_user(&self, id: i64) -> Result<User>;
async fn create_user(&self, user: CreateUser) -> Result<User>;
}
#[tokio::test]
async fn test_create_user_success() {
let mut mock = MockUserService::new();
mock.expect_create_user()
.with(eq(CreateUser {
email: "test@example.com".to_string(),
name: "Test User".to_string(),
}))
.times(1)
.returning(|_| Ok(User {
id: 1,
email: "test@example.com".to_string(),
name: "Test User".to_string(),
created_at: Utc::now(),
}));
let result = mock.create_user(CreateUser {
email: "test@example.com".to_string(),
name: "Test User".to_string(),
}).await;
assert!(result.is_ok());
let user = result.unwrap();
assert_eq!(user.email, "test@example.com");
}
// Integration test with real database
#[sqlx::test]
async fn test_repository_create_user(pool: PgPool) {
let repo = UserRepository::new(pool);
let user = repo.create(&CreateUser {
email: "integration@test.com".to_string(),
name: "Integration Test".to_string(),
password_hash: "hashed".to_string(),
}).await.unwrap();
assert_eq!(user.email, "integration@test.com");
// Verify user exists
let found = repo.find_by_email("integration@test.com")
.await
.unwrap()
.expect("User should exist");
assert_eq!(found.id, user.id);
}
// Property-based testing
proptest! {
#[test]
fn test_password_hash_properties(password in "[a-zA-Z0-9]{8,32}") {
let hash = hash_password(&password).unwrap();
prop_assert!(verify_password(&password, &hash).unwrap());
prop_assert_ne!(password, hash);
prop_assert!(hash.len() > password.len());
}
}
}
// AI Prompt: "Create test helpers for database and HTTP testing"
pub mod test_helpers {
use once_cell::sync::Lazy;
use sqlx::PgPool;
static TEST_DB: Lazy<String> = Lazy::new(|| {
format!("test_db_{}", uuid::Uuid::new_v4())
});
pub async fn setup_test_db() -> PgPool {
let db_url = std::env::var("DATABASE_URL")
.unwrap_or_else(|_| "postgres://localhost/postgres".to_string());
// Create test database
let mut conn = PgConnection::connect(&db_url).await.unwrap();
sqlx::query(&format!("CREATE DATABASE {}", *TEST_DB))
.execute(&mut conn)
.await
.unwrap();
// Run migrations
let pool = PgPool::connect(&format!("{}/{}", db_url, *TEST_DB))
.await
.unwrap();
sqlx::migrate!("./migrations")
.run(&pool)
.await
.unwrap();
pool
}
pub async fn teardown_test_db(pool: PgPool) {
pool.close().await;
let db_url = std::env::var("DATABASE_URL")
.unwrap_or_else(|_| "postgres://localhost/postgres".to_string());
let mut conn = PgConnection::connect(&db_url).await.unwrap();
sqlx::query(&format!("DROP DATABASE IF EXISTS {}", *TEST_DB))
.execute(&mut conn)
.await
.unwrap();
}
// Test data factory
pub struct UserFactory;
impl UserFactory {
pub fn build() -> CreateUser {
CreateUser {
email: format!("user{}@test.com", rand::random::<u32>()),
name: "Test User".to_string(),
password: "password123".to_string(),
}
}
pub async fn create(pool: &PgPool) -> User {
let user_data = Self::build();
let repo = UserRepository::new(pool.clone());
repo.create(&user_data).await.unwrap()
}
}
}
// AI Prompt: "Optimize this code for zero-copy operations and memory efficiency"
use bytes::Bytes;
use tokio::io::{AsyncRead, AsyncReadExt};
// Efficient buffer management
pub struct StreamProcessor {
buffer: Vec<u8>,
capacity: usize,
}
impl StreamProcessor {
pub fn new(capacity: usize) -> Self {
Self {
buffer: Vec::with_capacity(capacity),
capacity,
}
}
pub async fn process_stream<R: AsyncRead + Unpin>(
&mut self,
mut reader: R,
) -> Result<ProcessedData> {
// Reuse buffer
self.buffer.clear();
// Read efficiently
let mut total_bytes = 0;
loop {
// Read directly into buffer
let n = reader.read_buf(&mut self.buffer).await?;
if n == 0 {
break;
}
// Process without copying
self.process_chunk(&self.buffer[total_bytes..total_bytes + n])?;
total_bytes += n;
// Prevent unbounded growth
if self.buffer.len() > self.capacity {
self.buffer.truncate(self.capacity);
}
}
Ok(ProcessedData {
bytes_processed: total_bytes
})
}
fn process_chunk(&self, chunk: &[u8]) -> Result<()> {
// Process without allocation
for window in chunk.windows(4) {
// Process 4-byte windows without copying
let value = u32::from_le_bytes(window.try_into()?);
// ... process value
}
Ok(())
}
}
// Zero-copy string handling
pub fn process_text(input: &str) -> Result<Vec<&str>> {
// Return slices instead of owned strings
Ok(input
.lines()
.filter(|line| !line.is_empty())
.map(|line| line.trim())
.collect())
}

Rust Development Guidelines

  1. Safety First - Leverage Rust’s type system
  2. Error Handling - Use Result everywhere, avoid unwrap in production
  3. Documentation - Write comprehensive docs with examples
  4. Testing - Aim for high test coverage
  5. Performance - Profile before optimizing
  6. Idioms - Follow Rust API guidelines
// AI: "Implement builder pattern with validation"
#[derive(Default)]
pub struct ServerBuilder {
host: Option<String>,
port: Option<u16>,
workers: Option<usize>,
}
impl ServerBuilder {
pub fn new() -> Self {
Self::default()
}
pub fn host(mut self, host: impl Into<String>) -> Self {
self.host = Some(host.into());
self
}
pub fn port(mut self, port: u16) -> Self {
self.port = Some(port);
self
}
pub fn workers(mut self, workers: usize) -> Self {
self.workers = Some(workers);
self
}
pub fn build(self) -> Result<Server> {
Ok(Server {
host: self.host.ok_or_else(|| anyhow!("host required"))?,
port: self.port.unwrap_or(8080),
workers: self.workers.unwrap_or_else(num_cpus::get),
})
}
}
# AI Prompt: "Create multi-stage Docker build for Rust with caching"
# Build stage
FROM rust:1.75 as builder
# Create app user
RUN useradd -m -u 1001 app
WORKDIR /usr/src/app
# Cache dependencies
COPY Cargo.toml Cargo.lock ./
RUN mkdir src && \
echo "fn main() {}" > src/main.rs && \
cargo build --release && \
rm -rf src
# Build application
COPY . .
RUN cargo build --release
# Runtime stage
FROM debian:bookworm-slim
# Install runtime dependencies
RUN apt-get update && apt-get install -y \
ca-certificates \
&& rm -rf /var/lib/apt/lists/*
# Copy user from builder
COPY --from=builder /etc/passwd /etc/passwd
# Copy binary
COPY --from=builder /usr/src/app/target/release/app /usr/local/bin/app
USER app
EXPOSE 8080
CMD ["app"]