Multi provider

This commit is contained in:
Sean Morley
2024-06-10 00:46:01 +00:00
parent ee32a446e9
commit a535072224
6 changed files with 1323 additions and 392 deletions

View File

@@ -1,17 +0,0 @@
import { Client } from "minio";
const MINIO_SERVER_URL = process.env.MINIO_SERVER_URL;
const MINIO_ACCESS_KEY = process.env.MINIO_ACCESS_KEY;
const MINIO_SECRET_KEY = process.env.MINIO_SECRET_KEY;
const MINIO_CLIENT_URL = process.env.MINIO_CLIENT_URL;
const MINIO_USE_SSL = process.env.MINIO_USE_SSL;
const port = MINIO_CLIENT_URL?.split(":").pop(); // 9000
const minioClient = new Client({
endPoint: MINIO_SERVER_URL ? MINIO_SERVER_URL : "localhost",
port: port ? parseInt(port) : 9000,
useSSL: MINIO_USE_SSL ? MINIO_USE_SSL === "true" : false,
accessKey: MINIO_ACCESS_KEY as string,
secretKey: MINIO_SECRET_KEY as string,
});
export default minioClient;

117
src/lib/server/s3.ts Normal file
View File

@@ -0,0 +1,117 @@
import {
CreateBucketCommand,
HeadBucketCommand,
PutBucketPolicyCommand,
PutObjectCommand,
S3Client,
type S3ClientConfig,
} from "@aws-sdk/client-s3";
import { env } from "$env/dynamic/private";
console.log(env.AWS_ACCESS_KEY_ID as string);
const s3Config: S3ClientConfig = {
region: env.AWS_REGION as string,
credentials: {
accessKeyId: env.AWS_ACCESS_KEY_ID as string,
secretAccessKey: env.AWS_SECRET_ACCESS_KEY as string,
},
endpoint: env.AWS_S3_ENDPOINT, // Add the endpoint
forcePathStyle: true,
};
export const s3Client = new S3Client(s3Config);
export const ensureBucketExists = async (bucketName: string): Promise<void> => {
const headBucketCommand = new HeadBucketCommand({ Bucket: bucketName });
try {
await s3Client.send(headBucketCommand);
console.log(`Bucket ${bucketName} already exists.`);
} catch (error: any) {
if (error.$metadata.httpStatusCode === 404) {
console.log(`Bucket ${bucketName} does not exist. Creating...`);
const createBucketCommand = new CreateBucketCommand({
Bucket: bucketName,
});
await s3Client.send(createBucketCommand);
// Set a bucket policy to allow public read access
const bucketPolicy = {
Version: "2012-10-17",
Statement: [
{
Effect: "Allow",
Principal: "*", // This allows anyone (public)
Action: ["s3:GetBucketLocation", "s3:ListBucket"],
Resource: `arn:aws:s3:::${bucketName}`,
},
{
Effect: "Allow",
Principal: "*", // This allows anyone (public)
Action: "s3:GetObject",
Resource: `arn:aws:s3:::${bucketName}/*`,
},
],
};
const putBucketPolicyCommand = new PutBucketPolicyCommand({
Bucket: bucketName,
Policy: JSON.stringify(bucketPolicy),
});
await s3Client.send(putBucketPolicyCommand);
console.log(
`Bucket ${bucketName} created and public read access policy set.`
);
} else {
throw error; // Rethrow other errors
}
}
};
export const uploadObject = async (
bucketName: string,
fileName: string,
fileBuffer: Buffer
): Promise<string> => {
const putObjectCommand = new PutObjectCommand({
Bucket: bucketName,
Key: fileName,
Body: fileBuffer,
});
try {
await s3Client.send(putObjectCommand);
// Determine the provider from the endpoint
let endpoint = env.AWS_S3_ENDPOINT as string;
let objectUrl: string;
if (endpoint.includes("amazonaws.com")) {
// Amazon S3
objectUrl = `https://${bucketName}.s3.${env.AWS_REGION}.amazonaws.com/${fileName}`;
} else if (endpoint.includes("storage.googleapis.com")) {
// Google Cloud Storage
objectUrl = `https://storage.googleapis.com/${bucketName}/${fileName}`;
} else if (endpoint.includes("digitaloceanspaces.com")) {
// DigitalOcean Spaces
objectUrl = `https://${bucketName}.${endpoint}/${fileName}`;
} else if (endpoint.includes("supabase.co")) {
// Supabase Storage
endpoint = endpoint.replace("s3", "object/public"); // Remove the version
console.log(endpoint);
objectUrl = `${endpoint}/${bucketName}/${fileName}`;
} else {
// Default fallback
objectUrl = `${endpoint}/${bucketName}/${fileName}`;
}
return objectUrl;
} catch (error) {
console.error(
`Error uploading file ${fileName} to bucket ${bucketName}:`,
error
);
throw error;
}
};