Feng Tu made (lib)curl support "upload" resuming work for file:// URLs.

This commit is contained in:
Daniel Stenberg 2007-05-31 08:59:44 +00:00
parent 14b9b823e6
commit 713c9f8602
3 changed files with 40 additions and 2 deletions

View File

@ -5,6 +5,10 @@
\___|\___/|_| \_\_____| \___|\___/|_| \_\_____|
Changelog Changelog
Daniel S (31 May 2007)
- Feng Tu made (lib)curl support "upload" resuming work for file:// URLs.
Daniel S (30 May 2007) Daniel S (30 May 2007)
- Added CURLMOPT_MAXCONNECTS which is a curl_multi_setopt() option for setting - Added CURLMOPT_MAXCONNECTS which is a curl_multi_setopt() option for setting
the maximum size of the connection cache maximum size of the multi handle. the maximum size of the connection cache maximum size of the multi handle.

View File

@ -18,6 +18,7 @@ This release includes the following changes:
o more than one test harness can run at the same time without conflict o more than one test harness can run at the same time without conflict
o SFTP now supports quote commands before a transfer o SFTP now supports quote commands before a transfer
o CURLMOPT_MAXCONNECTS added to curl_multi_setopt() o CURLMOPT_MAXCONNECTS added to curl_multi_setopt()
o upload resume works for file:// URLs
This release includes the following bugfixes: This release includes the following bugfixes:

View File

@ -198,6 +198,8 @@ static CURLcode file_upload(struct connectdata *conn)
size_t nwrite; size_t nwrite;
curl_off_t bytecount = 0; curl_off_t bytecount = 0;
struct timeval now = Curl_tvnow(); struct timeval now = Curl_tvnow();
struct_stat file_stat;
char* buf2;
/* /*
* Since FILE: doesn't do the full init, we need to provide some extra * Since FILE: doesn't do the full init, we need to provide some extra
@ -213,7 +215,11 @@ static CURLcode file_upload(struct connectdata *conn)
if(!dir[1]) if(!dir[1])
return CURLE_FILE_COULDNT_READ_FILE; /* fix: better error code */ return CURLE_FILE_COULDNT_READ_FILE; /* fix: better error code */
fp = fopen(file->path, "wb"); if(data->reqdata.resume_from)
fp = fopen( file->path, "ab" );
else
fp = fopen(file->path, "wb");
if(!fp) { if(!fp) {
failf(data, "Can't open %s for writing", file->path); failf(data, "Can't open %s for writing", file->path);
return CURLE_WRITE_ERROR; return CURLE_WRITE_ERROR;
@ -223,6 +229,17 @@ static CURLcode file_upload(struct connectdata *conn)
/* known size of data to "upload" */ /* known size of data to "upload" */
Curl_pgrsSetUploadSize(data, data->set.infilesize); Curl_pgrsSetUploadSize(data, data->set.infilesize);
/* treat the negative resume offset value as the case of "-" */
if(data->reqdata.resume_from < 0){
if(stat(file->path, &file_stat)){
fclose(fp);
failf(data, "Can't get the size of %s", file->path);
return CURLE_WRITE_ERROR;
}
else
data->reqdata.resume_from = (curl_off_t)file_stat.st_size;
}
while (res == CURLE_OK) { while (res == CURLE_OK) {
int readcount; int readcount;
res = Curl_fillreadbuffer(conn, BUFSIZE, &readcount); res = Curl_fillreadbuffer(conn, BUFSIZE, &readcount);
@ -234,8 +251,24 @@ static CURLcode file_upload(struct connectdata *conn)
nread = (size_t)readcount; nread = (size_t)readcount;
/*skip bytes before resume point*/
if(data->reqdata.resume_from) {
if( nread <= data->reqdata.resume_from ) {
data->reqdata.resume_from -= nread;
nread = 0;
buf2 = buf;
}
else {
buf2 = buf + data->reqdata.resume_from;
nread -= data->reqdata.resume_from;
data->reqdata.resume_from = 0;
}
}
else
buf2 = buf;
/* write the data to the target */ /* write the data to the target */
nwrite = fwrite(buf, 1, nread, fp); nwrite = fwrite(buf2, 1, nread, fp);
if(nwrite != nread) { if(nwrite != nread) {
res = CURLE_SEND_ERROR; res = CURLE_SEND_ERROR;
break; break;