- 这里记录一下,Java对大文件的切分,和后端接口分片上传的实现逻辑
正常,前后端分离的项目其实是前端去切分文件,后端接口接收到切分后的分片文件去合并,这里都用java来记录一下。 - 特别说明:我这里用的是zip包的上传,里面是音频文件,如果你的文件是单个文件,切分和合并文件逻辑都是一样的,只是不用后续的解压。
- 因为是测试代码,所以部分代码规范不严谨
1.文件的切分
public static void chunkFile(){
//每片的大小,这里是10M
int TENMB = 10485760;
//要切分的大文件和切分后的文件放到目录
String PATH = "D:\\test\\fenpian\\";
try {
File file = new File(PATH, "55.zip");
RandomAccessFile accessFile = new RandomAccessFile(file, "r");
// 文件的大小
long size = FileUtil.size(file);
int chunkSize = (int) Math.ceil((double) size / TENMB);
for (int i = 0; i < chunkSize; i++) {
// 文件操作的指针位置
long filePointer = accessFile.getFilePointer();
byte[] bytes;
if (i == chunkSize - 1) {
int len = (int) (size - filePointer);
bytes = new byte[len];
accessFile.read(bytes, 0, bytes.length);
} else {
bytes = new byte[TENMB];
accessFile.read(bytes, 0, bytes.length);
}
FileUtil.writeBytes(bytes, new File(PATH, String.valueOf(i) + ".zip"));
}
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
}
2.Spring boot分片上传接口
controller层
@Resource
private FileUploadService fileUploadService;
@RequestMapping(value = "/upload")
public String upload(MultipartFileParam fileParam) throws IOException {
try{
return fileUploadService.fileUpload(fileParam);
}catch (Exception e){
e.printStackTrace();
return "error";
}
}
service层
public interface FileUploadService {
String fileUpload(MultipartFileParam fileParam) throws IOException;
}
service实现层
@Slf4j
@Service
public class FileUploadServiceImpl implements FileUploadService {
//合并后的文件的父目录
private String FILE_UPLOAD_DIR = "D:\\test\\fenpian";
//分片文件大小
private Integer CHUNK_SIZE = 10485760;
/**
* 分片上传
* @param fileParam
* @return
* @throws IOException
*/
private String chunkUpload(MultipartFileParam fileParam) throws IOException {
// 是否为最后一片
boolean lastFlag = false;
int currentChunk = fileParam.getChunk();
int totalChunk = fileParam.getTotalChunk();
long totalSize = fileParam.getTotalSize();
String fileName = fileParam.getName();
String fileMd5 = fileParam.getMd5();
MultipartFile multipartFile = fileParam.getFile();
String parentDir = FILE_UPLOAD_DIR + File.separator + fileMd5 + File.separator;
String tempFileName = fileName + "_tmp";
// 写入到临时文件
File tmpFile = tmpFile(parentDir, tempFileName, multipartFile, currentChunk, totalSize, fileMd5);
// 检测是否为最后一个分片(这里吧每个分片数据放到了一张表里,后续可以改为用redis记录)
FileChunkRecordExample example = new FileChunkRecordExample();
example.createCriteria().andMd5EqualTo(fileMd5);
long count = fileChunkRecordMapper.countByExample(example);
if (count == totalChunk) {
lastFlag = true;
}
if (lastFlag) {
// 检查md5是否一致
log.info("是否最后一个分片:{}","是");
if (!checkFileMd5(tmpFile, fileMd5)) {
cleanUp(tmpFile, fileMd5);
throw new RuntimeException("文件md5检测不符合要求, 请检查!");
}
System.out.println("开始重命名....");
File newFile = renameFile(tmpFile, fileName);
//解析文件数据 -解压缩unzip
System.out.println("开始解压缩....");
File zipFile = ZipUtil.unzip(newFile);
//得到压缩包内所有文件
System.out.println("遍历zipFile.....");
File[] files = zipFile.listFiles();
System.out.println("打印fileName.....");
//解析文件,处理业务数据
for (File file : files) {
System.out.println(file.getName());
}
log.info("所有文件上传完成, 时间是:{}, 文件名称是:{}", DateUtil.now(), fileName);
//所有数据都处理完成后,删除文件和数据库记录
cleanUp(new File(parentDir + fileName),fileMd5);
}else{
log.info("是否最后一个分片:{}","否");
}
return "success";
}
private File tmpFile(String parentDir, String tempFileName, MultipartFile file,
int currentChunk, long totalSize, String fileMd5) throws IOException {
log.info("开始上传文件, 时间是:{}, 文件名称是:{}", DateUtil.now(), tempFileName);
long position = (currentChunk - 1) * CHUNK_SIZE;
File tmpDir = new File(parentDir);
File tmpFile = new File(parentDir, tempFileName);
if (!tmpDir.exists()) {
tmpDir.mkdirs();
}
RandomAccessFile tempRaf = new RandomAccessFile(tmpFile, "rw");
if (tempRaf.length() == 0) {
tempRaf.setLength(totalSize);
}
// 写入该分片数据
FileChannel fc = tempRaf.getChannel();
MappedByteBuffer map = fc.map(FileChannel.MapMode.READ_WRITE, position, file.getSize());
map.put(file.getBytes());
clean(map);
fc.close();
tempRaf.close();
// 记录已经完成的分片
FileChunkRecord fileChunkRecord = new FileChunkRecord();
fileChunkRecord.setMd5(fileMd5);
fileChunkRecord.setUploadStatus(1);
fileChunkRecord.setChunk(currentChunk);
fileChunkRecordMapper.insert(fileChunkRecord);
log.info("分片文件上传完成, 时间是:{}, 文件名称是:{}", DateUtil.now(), tempFileName);
return tmpFile;
}
private void cleanUp(File file, String md5) {
if (file.exists()) {
file.delete();
}
// 删除上传记录
FileChunkRecordExample example = new FileChunkRecordExample();
example.createCriteria().andMd5EqualTo(md5);
fileChunkRecordMapper.deleteByExample(example);
}
/**
* 最后一片接受完后执行
* @param toBeRenamed
* @param toFileNewName
* @return
*/
private File renameFile(File toBeRenamed, String toFileNewName) {
// 检查要重命名的文件是否存在,是否是文件
if (!toBeRenamed.exists() || toBeRenamed.isDirectory()) {
log.info("File does not exist: " + toBeRenamed.getName());
throw new RuntimeException("File does not exist");
}
String parentPath = toBeRenamed.getParent();
File newFile = new File(parentPath + File.separatorChar + toFileNewName);
// 如果存在, 先删除
if (newFile.exists()) {
newFile.delete();
}
toBeRenamed.renameTo(newFile);
return newFile;
}
private static void clean(MappedByteBuffer map) {
try {
Method getCleanerMethod = map.getClass().getMethod("cleaner");
Cleaner.create(map, null);
getCleanerMethod.setAccessible(true);
Cleaner cleaner = (Cleaner) getCleanerMethod.invoke(map);
cleaner.clean();
} catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException e) {
e.printStackTrace();
}
}
/**
* 文件md5值检查,最后一片文件合并后执行,
* @param file 所有分片文件合并后的文件(正常情况下md5应该和前端传过来的大文件的md5一致)
* @param fileMd5 大文件的md5值
* @return
* @throws IOException
*/
private boolean checkFileMd5(File file, String fileMd5) throws IOException {
FileInputStream fis = new FileInputStream(file);
String checkMd5 = DigestUtils.md5DigestAsHex(fis).toUpperCase();
fis.close();
if (checkMd5.equals(fileMd5.toUpperCase())) {
return true;
}
return false;
}
/**
* 不分片
* @param fileParam
* @return
*/
private String singleUpload(MultipartFileParam fileParam) {
MultipartFile file = fileParam.getFile();
File baseFile = new File(FILE_UPLOAD_DIR);
if (!baseFile.exists()) {
baseFile.mkdirs();
}
try {
file.transferTo(new File(baseFile, fileParam.getName()));
Date now = new Date();
FileRecord fileRecord = new FileRecord();
String filePath = FILE_UPLOAD_DIR + File.separator + fileParam.getName();
long size = FileUtil.size(new File(filePath));
String sizeStr = size / (1024 * 1024) + "Mb";
fileRecord.setFileName(fileParam.getName()).setFilePath(filePath).setUploadStatus(1)
.setFileMd5(fileParam.getMd5()).setCreateTime(now).setUpdateTime(now).setFileSize(sizeStr);
//fileRecordMapper.insert(fileRecord);
} catch (IOException e) {
log.error("单独上传文件错误, 问题是:{}, 时间是:{}", e.getMessage(), DateUtil.now());
}
return "success";
}
}
后端入参实体类
package com.server.controller.bigFileUpload;
import lombok.Getter;
import lombok.Setter;
import lombok.ToString;
import lombok.experimental.Accessors;
import org.springframework.web.multipart.MultipartFile;
/**
* @description:
* @date: created in 2021/10/6
* @modified:
*/
@Getter
@Setter
@ToString
@Accessors(chain = true)
public class MultipartFileParam {
/**
* 是否分片
*/
private boolean chunkFlag;
/**
* 当前为第几块分片
*/
private int chunk;
/**
* 总分片数量
*/
private int totalChunk;
/**
* 文件总大小, 单位是byte
*/
private long totalSize;
/**
* 文件名
*/
private String name;
/**
* 文件
*/
private MultipartFile file;
/**
* md5值
*/
private String md5;
}
合并后的文件放到了文件mdf的文件夹内