1.分块上传使用场景
大文件加速上传:当文件大小超过100mb时,使用分片上传可实现并行上传多个part以加快上传速度。
网络环境较差:网络环境较差时,建议使用分片上传。当出现上传失败的时候,您仅需重传失败的part。
文件大小不确定: 可以在需要上传的文件大小还不确定的情况下开始上传,这种场景在视频 监控等行业应用中比较常见。
2.实现原理
实现原理其实很简单,核心就是客户端把大文件按照一定规则进行拆分,比如20mb为一个小块,分解成一个一个的文件块,然后把这些文件块单独上传到服务端,等到所有的文件块都上传完毕之后,客户端再通知服务端进行文件合并的操作,合并完成之后整个任务结束。
3.代码工程
实验目的
实现大文件分块上传
pom.xml
<?xml version="1.0" encoding="utf-8"?> <project xmlns="http://maven.apache.org/pom/4.0.0" xmlns:xsi="http://www.w3.org/2001/xmlschema-instance" xsi:schemalocation="http://maven.apache.org/pom/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <parent> <artifactid>springboot-demo</artifactid> <groupid>com.et</groupid> <version>1.0-snapshot</version> </parent> <modelversion>4.0.0</modelversion> <artifactid>file</artifactid> <properties> <maven.compiler.source>8</maven.compiler.source> <maven.compiler.target>8</maven.compiler.target> </properties> <dependencies> <dependency> <groupid>org.springframework.boot</groupid> <artifactid>spring-boot-starter-web</artifactid> </dependency> <dependency> <groupid>org.springframework.boot</groupid> <artifactid>spring-boot-autoconfigure</artifactid> </dependency> <dependency> <groupid>org.springframework.boot</groupid> <artifactid>spring-boot-starter-test</artifactid> <scope>test</scope> </dependency> <dependency> <groupid>org.apache.httpcomponents</groupid> <artifactid>httpclient</artifactid> </dependency> <dependency> <groupid>org.apache.httpcomponents</groupid> <artifactid>httpmime</artifactid> </dependency> <dependency> <groupid>org.projectlombok</groupid> <artifactid>lombok</artifactid> </dependency> <dependency> <groupid>cn.hutool</groupid> <artifactid>hutool-core</artifactid> <version>5.8.15</version> </dependency> </dependencies> </project>
controller
package com.et.controller; import com.et.bean.chunk; import com.et.bean.fileinfo; import com.et.service.chunkservice; import org.springframework.beans.factory.annotation.autowired; import org.springframework.core.io.resource; import org.springframework.http.httpheaders; import org.springframework.http.responseentity; import org.springframework.web.bind.annotation.*; import java.util.list; @restcontroller @requestmapping("file") public class chunkcontroller { @autowired private chunkservice chunkservice; /** * upload by part * * @param chunk * @return */ @postmapping(value = "chunk") public responseentity<string> chunk(chunk chunk) { chunkservice.chunk(chunk); return responseentity.ok("file chunk upload success"); } /** * merge * * @param filename * @return */ @getmapping(value = "merge") public responseentity<void> merge(@requestparam("filename") string filename) { chunkservice.merge(filename); return responseentity.ok().build(); } /** * get filename * * @return files */ @getmapping("/files") public responseentity<list<fileinfo>> list() { return responseentity.ok(chunkservice.list()); } /** * get single file * * @param filename * @return file */ @getmapping("/files/{filename:.+}") public responseentity<resource> getfile(@pathvariable("filename") string filename) { return responseentity.ok().header(httpheaders.content_disposition, "attachment; filename=\"" + filename + "\"").body(chunkservice.getfile(filename)); } }
config
package com.et.config; import com.et.service.fileclient; import com.et.service.impl.localfilesystemclient; import org.springframework.beans.factory.annotation.value; import org.springframework.context.annotation.bean; import org.springframework.context.annotation.configuration; import java.util.hashmap; import java.util.map; import java.util.function.supplier; @configuration public class fileclientconfig { @value("${file.client.type:local-file}") private string fileclienttype; private static final map<string, supplier<fileclient>> file_client_supply = new hashmap<string, supplier<fileclient>>() { { put("local-file", localfilesystemclient::new); // put("aws-s3", awsfileclient::new); } }; /** * get client * * @return */ @bean public fileclient fileclient() { return file_client_supply.get(fileclienttype).get(); } }
service
package com.et.service; import com.et.bean.chunk; import com.et.bean.fileinfo; import org.springframework.core.io.resource; import java.util.list; public interface chunkservice { void chunk(chunk chunk); void merge(string filename); list<fileinfo> list(); resource getfile(string filename); } package com.et.service.impl; import com.et.bean.chunk; import com.et.bean.chunkprocess; import com.et.bean.fileinfo; import com.et.service.chunkservice; import com.et.service.fileclient; import lombok.extern.slf4j.slf4j; import org.springframework.beans.factory.annotation.autowired; import org.springframework.core.io.resource; import org.springframework.stereotype.service; import java.text.simpledateformat; import java.util.date; import java.util.list; import java.util.map; import java.util.optional; import java.util.concurrent.concurrenthashmap; import java.util.concurrent.copyonwritearraylist; import java.util.concurrent.atomic.atomicboolean; @service @slf4j public class chunkserviceimpl implements chunkservice { // process private static final map<string, chunkprocess> chunk_process_storage = new concurrenthashmap<>(); // file list private static final list<fileinfo> file_storage = new copyonwritearraylist<>(); @autowired private fileclient fileclient; @override public void chunk(chunk chunk) { string filename = chunk.getfilename(); boolean match = file_storage.stream().anymatch(fileinfo -> fileinfo.getfilename().equals(filename)); if (match) { throw new runtimeexception("file [ " + filename + " ] already exist"); } chunkprocess chunkprocess; string uploadid; if (chunk_process_storage.containskey(filename)) { chunkprocess = chunk_process_storage.get(filename); uploadid = chunkprocess.getuploadid(); atomicboolean isuploaded = new atomicboolean(false); optional.ofnullable(chunkprocess.getchunklist()).ifpresent(chunkpartlist -> isuploaded.set(chunkpartlist.stream().anymatch(chunkpart -> chunkpart.getchunknumber() == chunk.getchunknumber()))); if (isuploaded.get()) { log.info("file【{}】chunk【{}】upload,jump", chunk.getfilename(), chunk.getchunknumber()); return; } } else { uploadid = fileclient.inittask(filename); chunkprocess = new chunkprocess().setfilename(filename).setuploadid(uploadid); chunk_process_storage.put(filename, chunkprocess); } list<chunkprocess.chunkpart> chunklist = chunkprocess.getchunklist(); string chunkid = fileclient.chunk(chunk, uploadid); chunklist.add(new chunkprocess.chunkpart(chunkid, chunk.getchunknumber())); chunk_process_storage.put(filename, chunkprocess.setchunklist(chunklist)); } @override public void merge(string filename) { chunkprocess chunkprocess = chunk_process_storage.get(filename); fileclient.merge(chunkprocess); simpledateformat simpledateformat = new simpledateformat("yyyy-mm-dd hh:mm:ss"); string currenttime = simpledateformat.format(new date()); file_storage.add(new fileinfo().setuploadtime(currenttime).setfilename(filename)); chunk_process_storage.remove(filename); } @override public list<fileinfo> list() { return file_storage; } @override public resource getfile(string filename) { return fileclient.getfile(filename); } } package com.et.service.impl; import com.et.bean.fileinfo; import com.et.service.fileuploadservice; import lombok.extern.slf4j.slf4j; import org.springframework.beans.factory.annotation.value; import org.springframework.core.io.filesystemresource; import org.springframework.core.io.resource; import org.springframework.stereotype.service; import org.springframework.util.filecopyutils; import org.springframework.web.multipart.multipartfile; import java.io.file; import java.io.ioexception; import java.io.inputstream; import java.io.outputstream; import java.nio.file.files; import java.nio.file.paths; import java.text.simpledateformat; import java.util.date; import java.util.list; import java.util.concurrent.copyonwritearraylist; @service @slf4j public class fileuploadserviceimpl implements fileuploadservice { @value("${upload.path:/data/upload/}") private string filepath; private static final list<fileinfo> file_storage = new copyonwritearraylist<>(); @override public void upload(multipartfile[] files) { simpledateformat simpledateformat = new simpledateformat("yyyy-mm-dd hh:mm:ss"); for (multipartfile file : files) { string filename = file.getoriginalfilename(); boolean match = file_storage.stream().anymatch(fileinfo -> fileinfo.getfilename().equals(filename)); if (match) { throw new runtimeexception("file [ " + filename + " ] already exist"); } string currenttime = simpledateformat.format(new date()); try (inputstream in = file.getinputstream(); outputstream out = files.newoutputstream(paths.get(filepath + filename))) { filecopyutils.copy(in, out); } catch (ioexception e) { log.error("file [{}] upload failed", filename, e); throw new runtimeexception(e); } fileinfo fileinfo = new fileinfo().setfilename(filename).setuploadtime(currenttime); file_storage.add(fileinfo); } } @override public list<fileinfo> list() { return file_storage; } @override public resource getfile(string filename) { file_storage.stream() .filter(info -> info.getfilename().equals(filename)) .findfirst() .orelsethrow(() -> new runtimeexception("file [ " + filename + " ] not exist")); file file = new file(filepath + filename); return new filesystemresource(file); } }
以上只是一些关键代码,所有代码请参见下面代码仓库
代码仓库
4.测试
启动sprint boot应用
编写测试类
@test public void testupload() throws exception { string chunkfilefolder = "d:/tmp/"; java.io.file file = new java.io.file("d:/software/oss-browser-win32-ia32.zip"); long contentlength = file.length(); // partsize:20mb long partsize = 20 * 1024 * 1024; // the last partsize may less 20mb long chunkfilenum = (long) math.ceil(contentlength * 1.0 / partsize); resttemplate resttemplate = new resttemplate(); try (randomaccessfile raf_read = new randomaccessfile(file, "r")) { // buffer byte[] b = new byte[1024]; for (int i = 1; i <= chunkfilenum; i++) { // chunk java.io.file chunkfile = new java.io.file(chunkfilefolder + i); // write try (randomaccessfile raf_write = new randomaccessfile(chunkfile, "rw")) { int len; while ((len = raf_read.read(b)) != -1) { raf_write.write(b, 0, len); if (chunkfile.length() >= partsize) { break; } } // upload multivaluemap<string, object> body = new linkedmultivaluemap<>(); body.add("file", new filesystemresource(chunkfile)); body.add("chunknumber", i); body.add("chunksize", partsize); body.add("currentchunksize", chunkfile.length()); body.add("totalsize", contentlength); body.add("filename", file.getname()); body.add("totalchunks", chunkfilenum); httpheaders headers = new httpheaders(); headers.setcontenttype(mediatype.multipart_form_data); httpentity<multivaluemap<string, object>> requestentity = new httpentity<>(body, headers); string serverurl = "http://localhost:8080/file/chunk"; responseentity<string> response = resttemplate.postforentity(serverurl, requestentity, string.class); system.out.println("response code: " + response.getstatuscode() + " response body: " + response.getbody()); } finally { fileutil.del(chunkfile); } } } // merge file string mergeurl = "http://localhost:8080/file/merge?filename=" + file.getname(); responseentity<string> response = resttemplate.getforentity(mergeurl, string.class); system.out.println("response code: " + response.getstatuscode() + " response body: " + response.getbody()); }
运行测试类,日志如下
以上就是基于springboot实现大文件分块上传功能的详细内容,更多关于springboot大文件分块上传的资料请关注代码网其它相关文章!
发表评论