先用vite搭一个vue3项目
pnpm create vite首先拿到上传的文件,通过 <input type="file"/>change事件拿到File文件对象,File继承自Blob,可以调用Blob的实例方法,然后用slice方法做分割;
// App.vue <script setup> import { ref } from "vue"; import { createChunks } from "./utils"; // 保存切片 const fileChunks = ref([]); function handleFileChange(e) { // 获取文件对象 const file = e.target.files[0]; if (!file) { return; } fileChunks.value = createChunks(file); console.log(fileChunks.value); } </script> <template> <input type="file" @change="handleFileChange" /> <button>上传</button> </template> // utils.js // 堆代码 duidaima.com // 默认每个切片3MB const CHUNK_SIZE = 3 * 1024 * 1024; export function createChunks(file, size = CHUNK_SIZE) { const chunks = []; for (let i = 0; i < file.size; i += size) { chunks.push(file.slice(i, i + size)); } return chunks; }2、计算hash
// App.vue <script setup> import { ref } from "vue"; import { createChunks, calculateFileHash } from "./utils"; const fileChunks = ref([]); async function handleFileChange(e) { const file = e.target.files[0]; if (!file) { return; } fileChunks.value = createChunks(file); const sT = Date.now(); const hash = await calculateFileHash(fileChunks.value); console.log(Date.now() - sT); //测试一下计算hash耗时 } </script> // utils.js import SparkMD5 from "spark-md5"; export function calculateFileHash(chunkList) { return new Promise((resolve) => { const spark = new SparkMD5.ArrayBuffer(); // FileReader读取文件内容 const reader = new FileReader(); reader.readAsArrayBuffer(new Blob(chunkList)); // 读取成功回调 reader.onload = (e) => { spark.append(e.target.result); resolve(spark.end()); }; }); }上面calculateFileHash这个函数计算hash使用文件所有切片内容,如果文件很大,将会非常耗时,测试了一个526MB的文件,需要6813ms左右,为了保证所有切片都参与计算,也不至于太耗时,采取下面这种方式:
// utils.js const CHUNK_SIZE = 3 * 1024 * 1024; export function calculateFileHash(chunkList) { return new Promise((resolve) => { const spark = new SparkMD5.ArrayBuffer(); const reader = new FileReader(); // 抽取chunk const chunks = []; for (let i = 0; i < chunkList.length; i++) { const chunk = chunkList[i]; if (i === 0 || i === chunkList.length - 1) { chunks.push(chunk); } else { chunks.push(chunk.slice(0, 2)); chunks.push(chunk.slice(CHUNK_SIZE / 2, CHUNK_SIZE / 2 + 2)); chunks.push(chunk.slice(CHUNK_SIZE - 2, CHUNK_SIZE)); } } reader.readAsArrayBuffer(new Blob(chunks)); reader.onload = (e) => { spark.append(e.target.result); resolve(spark.end()); }; }); }再次传同一个文件测试,只需要975ms左右
// App.vue <script setup> import { createChunks, calculateFileHash, createFormData, concurrentChunksUpload, } from "./utils"; async function uploadChunks() { const hash = await calculateFileHash(fileChunks.value); // 利用计算的文件hash构造formData const dataList = createFormData(fileChunks.value, hash); // 切片上传请求 await concurrentChunksUpload(dataList); } </script <template> <input type="file" @change="handleFileChange" /> <button @click="uploadChunks()">上传</button> </template>utils.js
const CHUNK_SIZE = 10 * 1024 * 1024; const BASE_URL = "http://localhost:2024"; // 根据切片的数量组装相同数量的formData export function createFormData(fileChunks, hash) { return fileChunks .map((chunk, index) => ({ fileHash: hash, chunkHash: `${hash}-${index}`, chunk, })) .map(({ fileHash, chunkHash, chunk }) => { const formData = new FormData(); formData.append("fileHash", fileHash); formData.append("chunkHash", chunkHash); formData.append(`chunk-${chunkHash}`, chunk); return formData; }); } // 默认最大同时发送6个请求 export function concurrentChunksUpload(dataList, max = 6) { return new Promise((resolve) => { if (dataList.length === 0) { resolve([]); return; } const dataLength = dataList.length; // 保存所有成功结果 const results = []; // 下一个请求 let next = 0; // 请求完成数量 let finished = 0; async function _request() { // next达到dataList个数,就停止 if (next === dataLength) { return; } const i = next; next++; const formData = dataList[i]; const url = `${BASE_URL}/upload-chunks`; try { const res = await axios.post(url, formData); results[i] = res.data; finished++; // 所有切片上传成功返回 if (finished === dataLength) { resolve(results); } _request(); } catch (err) { console.log(err); } } // 最大并发数如果大于formData个数,取最小数 const minTimes = Math.min(max, dataLength); for (let i = 0; i < minTimes; i++) { _request(); } }); }后端逻辑:
const path = require("path"); const fs = require("fs"); const Koa = require("koa"); const KoaRouter = require("@koa/router"); const cors = require("@koa/cors"); const { koaBody } = require("koa-body"); const app = new Koa(); const router = new KoaRouter(); // 保存切片目录 const chunksDir = path.resolve(__dirname, "../chunks"); //cors解决跨域 app.use(cors()); app.use(router.routes()).use(router.allowedMethods()); app.listen(2024, () => console.log("Koa文件服务器启动")); // 中间件:处理multipart/form-data,切片写入磁盘 const uploadKoaBody = koaBody({ multipart: true, formidable: { // 设置保存切片的文件夹 uploadDir: chunksDir, // 在保存到磁盘前回调 onFileBegin(name, file) { if (!fs.existsSync(chunksDir)) { fs.mkdirSync(chunksDir); } // 切片重命名 file.filepath = `${chunksDir}/${name}`; }, }, }); // 上传chunks切片接口 router.post("/upload-chunks", uploadKoaBody, (ctx) => { ctx.body = { code: 200, msg: "文件上传成功" }; });4、合并切片
// App.vue <script setup> import { createChunks, calculateFileHash, createFormData, concurrentChunksUpload, mergeChunks } from "./utils"; async function uploadChunks() { const hash = await calculateFileHash(fileChunks.value); // 利用计算的文件hash构造formData const dataList = createFormData(fileChunks.value, hash); // 切片上传请求 await concurrentChunksUpload(dataList); // 等所有chunks发送完毕,发送合并请求 mergeChunks(originFile.value.name); } </script <template> <input type="file" @change="handleFileChange" /> <button @click="uploadChunks()">上传</button> </template>utils.js
export function mergeChunks(filename) { return axios.post(BASE_URL + "/merge-chunks", { filename, size: CHUNK_SIZE }); }后端逻辑:
// 合并chunks接口 router.post("/merge-chunks", koaBody(), async (ctx) => { const { filename, size } = ctx.request.body; await mergeChunks(filename, size); ctx.body = { code: 200, msg: "合并成功" }; }); // 合并 chunks async function mergeChunks(filename, size) { // 读取chunks目录中的文件名 const chunksName = fs.readdirSync(chunksDir); if (!chunksName.length) return; // 保证切片合并顺序 chunksName.sort((a, b) => a.split("-")[2] - b.split("-")[2]); // 提前创建要写入的static目录 const fileDir = path.resolve(__dirname, "../static"); if (!fs.existsSync(fileDir)) { fs.mkdirSync(fileDir); } // 最后写入的文件路径 const filePath = path.resolve(fileDir, filename); const pipeStreams = chunksName.map((chunkName, index) => { const chunkPath = path.resolve(chunksDir, chunkName); // 创建写入流 const writeStream = fs.createWriteStream(filePath, { start: index * size }); return createPipeStream(chunkPath, writeStream); }); await Promise.all(pipeStreams); // 全部写完,删除chunks切片目录 fs.rmdirSync(chunksDir); } // 创建管道流写入 function createPipeStream(chunkPath, writeStream) { return new Promise((resolve) => { const readStream = fs.createReadStream(chunkPath); readStream.pipe(writeStream); readStream.on("end", () => { // 写完一个chunk,就删除 fs.unlinkSync(chunkPath); resolve(); }); }); }5、秒传文件
// 中间件,已经存在的切片,直接返回成功结果 async function verifyChunks(ctx, next) { // 前端把切片hash放到请求路径上带过来 const chunkName = ctx.request.querystring.split("=")[1]; const chunkPath = path.resolve(chunksDir, chunkName); if (fs.existsSync(chunkPath)) { ctx.body = { code: 200, msg: "文件已上传" }; } else { await next(); } } // 上传chunks切片接口 router.post("/upload-chunks", verifyChunks, uploadKoaBody, (ctx) => { ctx.body = { code: 200, msg: "文件上传成功" }; });前端这边修改一下请求路径,带个参数过去
export function concurrentChunksUpload(dataList, max = 6) { return new Promise((resolve) => { //... const formData = dataList[i]; const chunkName = `chunk-${formData.get("chunkHash")}`; const url = `${BASE_URL}/upload-chunks?chunkName=${chunkName}`; //... }); }6、暂停上传
<script setup> import axios from "axios"; const CancelToken = axios.CancelToken; let axiosSource = CancelToken.source(); function pauseUpload() { axiosSource.cancel?.(); } async function uploadChunks(existentChunks = []) { const hash = await calculateFileHash(fileChunks.value); const dataList = createFormData(fileChunks.value, hash, existentChunks); await concurrentChunksUpload(axiosSource.token, dataList); // 等所有chunks发送完毕,发送合并请求 mergeChunks(originFile.value.name); } </script> <template> <input type="file" @change="handleFileChange" /> <button @click="uploadChunks()">上传</button> <button @click="pauseUpload">暂停</button> </template>
// utils.js export function concurrentChunksUpload(sourceToken, dataList, max = 6) { return new Promise((resolve) => { //... const res = await axios.post(url, formData, { cancelToken: sourceToken, }); //... }); }7、继续上传
<script setup> import { getExistentChunks } from "./utils"; async function continueUpload() { const { data } = await getExistentChunks(); uploadChunks(data); } // existentChunks 默认空数组 async function uploadChunks(existentChunks = []) { const hash = await calculateFileHash(fileChunks.value); // existentChunks传入过滤已经上传的切片 const dataList = createFormData(fileChunks.value, hash, existentChunks); // 重新生成source axiosSource = CancelToken.source(); await concurrentChunksUpload(axiosSource.token, dataList); // 等所有chunks发送完毕,发送合并请求 mergeChunks(originFile.value.name); } </script> <template> <input type="file" @change="handleFileChange" /> <button @click="uploadChunks()">上传</button> <button @click="pauseUpload">暂停</button> <button @click="continueUpload">继续</button> </template>utils.js
export function createFormData(fileChunks, hash, existentChunks) { const existentChunksName = existentChunks // 如果切片有损坏,切片大小可能就不等于CHUNK_SIZE,重新传 // 最后一张切片大小大概率是不等的 .filter((item) => item.size === CHUNK_SIZE) .map((item) => item.filename); return fileChunks .map((chunk, index) => ({ fileHash: hash, chunkHash: `${hash}-${index}`, chunk, })) .filter(({ chunkHash }) => { // 同时过滤掉已经上传的切片 return !existentChunksName.includes(`chunk-${chunkHash}`); }) .map(({ fileHash, chunkHash, chunk }) => { const formData = new FormData(); formData.append("fileHash", fileHash); formData.append("chunkHash", chunkHash); formData.append(`chunk-${chunkHash}`, chunk); return formData; }); } export function getExistentChunks() { return axios.post(BASE_URL + "/existent-chunks"); }后端逻辑
// 获取已经上传的切片接口 router.post("/existent-chunks", (ctx) => { if (!fs.existsSync(chunksDir)) { ctx.body = []; return; } ctx.body = fs.readdirSync(chunksDir).map((filename) => { return { // 切片名:chunk-tue234wdhfjksd211tyf3234-1 filename, // 切片大小 size: fs.statSync(`${chunksDir}/${filename}`).size, }; }); });最后