1
0
mirror of https://github.com/moparisthebest/curl synced 2024-12-21 15:48:49 -05:00

The fread() callback pointer and associated pointer is now stored in the

connectdata struct instead, and is no longer modified within the 'set' struct
as previously (which was a really BAAAD thing).
This commit is contained in:
Daniel Stenberg 2002-12-09 15:37:54 +00:00
parent c65e088caf
commit 4bcc866c52
6 changed files with 241 additions and 237 deletions

View File

@ -1591,8 +1591,8 @@ CURLcode Curl_ftp_nextconnect(struct connectdata *conn)
readthisamountnow = BUFSIZE;
actuallyread =
data->set.fread(data->state.buffer, 1, readthisamountnow,
data->set.in);
conn->fread(data->state.buffer, 1, readthisamountnow,
conn->fread_in);
passed += actuallyread;
if(actuallyread != readthisamountnow) {
@ -1623,7 +1623,7 @@ CURLcode Curl_ftp_nextconnect(struct connectdata *conn)
}
}
/* Send everything on data->set.in to the socket */
/* Send everything on data->state.in to the socket */
if(data->set.ftp_append) {
/* we append onto the file instead of rewriting it */
FTPSENDF(conn, "APPE %s", ftp->file);

View File

@ -126,8 +126,11 @@ send_buffer *add_buffer_init(void)
* add_buffer_send() sends a buffer and frees all associated memory.
*/
static
CURLcode add_buffer_send(int sockfd, struct connectdata *conn, send_buffer *in,
long *bytes_written)
CURLcode add_buffer_send(send_buffer *in,
int sockfd,
struct connectdata *conn,
long *bytes_written) /* add the number of sent
bytes to this counter */
{
ssize_t amount;
CURLcode res;
@ -149,6 +152,8 @@ CURLcode add_buffer_send(int sockfd, struct connectdata *conn, send_buffer *in,
/* this data _may_ contain binary stuff */
Curl_debug(conn->data, CURLINFO_HEADER_OUT, ptr, amount);
*bytes_written += amount;
if(amount != size) {
size -= amount;
ptr += amount;
@ -162,8 +167,6 @@ CURLcode add_buffer_send(int sockfd, struct connectdata *conn, send_buffer *in,
free(in->buffer);
free(in);
*bytes_written += amount;
return res;
}
@ -517,8 +520,9 @@ CURLcode Curl_http_done(struct connectdata *conn)
Curl_formclean(http->sendit); /* Now free that whole lot */
data->set.fread = http->storefread; /* restore */
data->set.in = http->in; /* restore */
/* set the proper values */
conn->fread = data->set.fread; /* restore */
conn->fread_in = data->set.in; /* restore */
}
else if(HTTPREQ_PUT == data->set.httpreq)
conn->bytecount = http->readbytecount + http->writebytecount;
@ -923,13 +927,9 @@ CURLcode Curl_http(struct connectdata *conn)
return CURLE_HTTP_POST_ERROR;
}
http->storefread = data->set.fread; /* backup */
http->in = data->set.in; /* backup */
data->set.fread = (curl_read_callback)
Curl_FormReader; /* set the read function to read from the
generated form data */
data->set.in = (FILE *)&http->form;
/* set the read function to read from the generated form data */
conn->fread = (curl_read_callback)Curl_FormReader;
conn->fread_in = &http->form;
if(!conn->bits.upload_chunky)
/* only add Content-Length if not uploading chunked */
@ -974,7 +974,7 @@ CURLcode Curl_http(struct connectdata *conn)
Curl_pgrsSetUploadSize(data, http->postsize);
/* fire away the whole request to the server */
result = add_buffer_send(conn->firstsocket, conn, req_buffer,
result = add_buffer_send(req_buffer, conn->firstsocket, conn,
&data->info.request_size);
if(result)
failf(data, "Failed sending POST request");
@ -1004,10 +1004,10 @@ CURLcode Curl_http(struct connectdata *conn)
Curl_pgrsSetUploadSize(data, data->set.infilesize);
/* this sends the buffer and frees all the buffer resources */
result = add_buffer_send(conn->firstsocket, conn, req_buffer,
result = add_buffer_send(req_buffer, conn->firstsocket, conn,
&data->info.request_size);
if(result)
failf(data, "Faied sending POST request");
failf(data, "Failed sending POST request");
else
/* prepare for transfer */
result = Curl_Transfer(conn, conn->firstsocket, -1, TRUE,
@ -1053,7 +1053,7 @@ CURLcode Curl_http(struct connectdata *conn)
data->set.postfields );
/* issue the request */
result = add_buffer_send(conn->firstsocket, conn, req_buffer,
result = add_buffer_send(req_buffer, conn->firstsocket, conn,
&data->info.request_size);
if(result)
@ -1070,7 +1070,7 @@ CURLcode Curl_http(struct connectdata *conn)
add_buffer(req_buffer, "\r\n", 2);
/* issue the request */
result = add_buffer_send(conn->firstsocket, conn, req_buffer,
result = add_buffer_send(req_buffer, conn->firstsocket, conn,
&data->info.request_size);
if(result)

View File

@ -275,7 +275,8 @@ int cert_stuff(struct connectdata *conn,
if (SSL_CTX_use_PrivateKey_file(conn->ssl.ctx,
key_file,
file_type) != 1) {
failf(data, "unable to set private key file\n");
failf(data, "unable to set private key file: '%s' type %s\n",
key_file, key_type?key_type:"PEM");
return 0;
}
break;

View File

@ -597,240 +597,240 @@ CURLcode Curl_readwrite(struct connectdata *conn,
else if(checkprefix("Last-Modified:", k->p) &&
(data->set.timecondition || data->set.get_filetime) ) {
time_t secs=time(NULL);
k->timeofdoc = curl_getdate(k->p+strlen("Last-Modified:"),
&secs);
if(data->set.get_filetime)
data->info.filetime = k->timeofdoc;
}
else if ((k->httpcode >= 300 && k->httpcode < 400) &&
(data->set.http_follow_location) &&
checkprefix("Location:", k->p)) {
/* this is the URL that the server advices us to get instead */
char *ptr;
char *start=k->p;
char backup;
start += 9; /* pass "Location:" */
/* Skip spaces and tabs. We do this to support multiple
white spaces after the "Location:" keyword. */
while(*start && isspace((int)*start ))
start++;
ptr = start; /* start scanning here */
/* scan through the string to find the end */
while(*ptr && !isspace((int)*ptr))
ptr++;
backup = *ptr; /* store the ending letter */
if(ptr != start) {
*ptr = '\0'; /* zero terminate */
conn->newurl = strdup(start); /* clone string */
*ptr = backup; /* restore ending letter */
k->timeofdoc = curl_getdate(k->p+strlen("Last-Modified:"),
&secs);
if(data->set.get_filetime)
data->info.filetime = k->timeofdoc;
}
}
else if ((k->httpcode >= 300 && k->httpcode < 400) &&
(data->set.http_follow_location) &&
checkprefix("Location:", k->p)) {
/* this is the URL that the server advices us to get instead */
char *ptr;
char *start=k->p;
char backup;
/*
* End of header-checks. Write them to the client.
*/
start += 9; /* pass "Location:" */
k->writetype = CLIENTWRITE_HEADER;
if (data->set.http_include_header)
k->writetype |= CLIENTWRITE_BODY;
/* Skip spaces and tabs. We do this to support multiple
white spaces after the "Location:" keyword. */
while(*start && isspace((int)*start ))
start++;
ptr = start; /* start scanning here */
if(data->set.verbose)
Curl_debug(data, CURLINFO_HEADER_IN,
k->p, k->hbuflen);
/* scan through the string to find the end */
while(*ptr && !isspace((int)*ptr))
ptr++;
backup = *ptr; /* store the ending letter */
if(ptr != start) {
*ptr = '\0'; /* zero terminate */
conn->newurl = strdup(start); /* clone string */
*ptr = backup; /* restore ending letter */
}
}
result = Curl_client_write(data, k->writetype, k->p,
k->hbuflen);
if(result)
return result;
/*
* End of header-checks. Write them to the client.
*/
data->info.header_size += k->hbuflen;
conn->headerbytecount += k->hbuflen;
k->writetype = CLIENTWRITE_HEADER;
if (data->set.http_include_header)
k->writetype |= CLIENTWRITE_BODY;
if(data->set.verbose)
Curl_debug(data, CURLINFO_HEADER_IN,
k->p, k->hbuflen);
result = Curl_client_write(data, k->writetype, k->p,
k->hbuflen);
if(result)
return result;
data->info.header_size += k->hbuflen;
conn->headerbytecount += k->hbuflen;
/* reset hbufp pointer && hbuflen */
k->hbufp = data->state.headerbuff;
k->hbuflen = 0;
}
while (!stop_reading && *k->str); /* header line within buffer */
/* reset hbufp pointer && hbuflen */
k->hbufp = data->state.headerbuff;
k->hbuflen = 0;
}
while (!stop_reading && *k->str); /* header line within buffer */
if(stop_reading)
/* We've stopped dealing with input, get out of the do-while loop */
break;
if(stop_reading)
/* We've stopped dealing with input, get out of the do-while loop */
break;
/* We might have reached the end of the header part here, but
there might be a non-header part left in the end of the read
buffer. */
/* We might have reached the end of the header part here, but
there might be a non-header part left in the end of the read
buffer. */
} /* end if header mode */
} /* end if header mode */
/* This is not an 'else if' since it may be a rest from the header
parsing, where the beginning of the buffer is headers and the end
is non-headers. */
if (k->str && !k->header && (nread > 0)) {
/* This is not an 'else if' since it may be a rest from the header
parsing, where the beginning of the buffer is headers and the end
is non-headers. */
if (k->str && !k->header && (nread > 0)) {
if(0 == k->bodywrites) {
/* These checks are only made the first time we are about to
write a piece of the body */
if(conn->protocol&PROT_HTTP) {
/* HTTP-only checks */
if (conn->newurl) {
/* abort after the headers if "follow Location" is set */
infof (data, "Follow to new URL: %s\n", conn->newurl);
k->keepon &= ~KEEP_READ;
FD_ZERO(&k->rkeepfd);
*done = TRUE;
return CURLE_OK;
}
else if (conn->resume_from &&
!k->content_range &&
(data->set.httpreq==HTTPREQ_GET)) {
/* we wanted to resume a download, although the server
doesn't seem to support this and we did this with a GET
(if it wasn't a GET we did a POST or PUT resume) */
failf (data, "HTTP server doesn't seem to support "
"byte ranges. Cannot resume.");
return CURLE_HTTP_RANGE_ERROR;
}
else if(data->set.timecondition && !conn->range) {
/* A time condition has been set AND no ranges have been
requested. This seems to be what chapter 13.3.4 of
RFC 2616 defines to be the correct action for a
HTTP/1.1 client */
if((k->timeofdoc > 0) && (data->set.timevalue > 0)) {
switch(data->set.timecondition) {
case TIMECOND_IFMODSINCE:
default:
if(k->timeofdoc < data->set.timevalue) {
infof(data,
"The requested document is not new enough\n");
*done = TRUE;
return CURLE_OK;
}
break;
case TIMECOND_IFUNMODSINCE:
if(k->timeofdoc > data->set.timevalue) {
infof(data,
"The requested document is not old enough\n");
*done = TRUE;
return CURLE_OK;
}
break;
} /* switch */
} /* two valid time strings */
} /* we have a time condition */
if(0 == k->bodywrites) {
/* These checks are only made the first time we are about to
write a piece of the body */
if(conn->protocol&PROT_HTTP) {
/* HTTP-only checks */
if (conn->newurl) {
/* abort after the headers if "follow Location" is set */
infof (data, "Follow to new URL: %s\n", conn->newurl);
k->keepon &= ~KEEP_READ;
FD_ZERO(&k->rkeepfd);
*done = TRUE;
return CURLE_OK;
}
else if (conn->resume_from &&
!k->content_range &&
(data->set.httpreq==HTTPREQ_GET)) {
/* we wanted to resume a download, although the server
doesn't seem to support this and we did this with a GET
(if it wasn't a GET we did a POST or PUT resume) */
failf (data, "HTTP server doesn't seem to support "
"byte ranges. Cannot resume.");
return CURLE_HTTP_RANGE_ERROR;
}
else if(data->set.timecondition && !conn->range) {
/* A time condition has been set AND no ranges have been
requested. This seems to be what chapter 13.3.4 of
RFC 2616 defines to be the correct action for a
HTTP/1.1 client */
if((k->timeofdoc > 0) && (data->set.timevalue > 0)) {
switch(data->set.timecondition) {
case TIMECOND_IFMODSINCE:
default:
if(k->timeofdoc < data->set.timevalue) {
infof(data,
"The requested document is not new enough\n");
*done = TRUE;
return CURLE_OK;
}
break;
case TIMECOND_IFUNMODSINCE:
if(k->timeofdoc > data->set.timevalue) {
infof(data,
"The requested document is not old enough\n");
*done = TRUE;
return CURLE_OK;
}
break;
} /* switch */
} /* two valid time strings */
} /* we have a time condition */
} /* this is HTTP */
} /* this is the first time we write a body part */
k->bodywrites++;
} /* this is HTTP */
} /* this is the first time we write a body part */
k->bodywrites++;
/* pass data to the debug function before it gets "dechunked" */
if(data->set.verbose) {
if(k->badheader) {
Curl_debug(data, CURLINFO_DATA_IN, data->state.headerbuff,
k->hbuflen);
if(k->badheader == HEADER_PARTHEADER)
/* pass data to the debug function before it gets "dechunked" */
if(data->set.verbose) {
if(k->badheader) {
Curl_debug(data, CURLINFO_DATA_IN, data->state.headerbuff,
k->hbuflen);
if(k->badheader == HEADER_PARTHEADER)
Curl_debug(data, CURLINFO_DATA_IN, k->str, nread);
}
else
Curl_debug(data, CURLINFO_DATA_IN, k->str, nread);
}
else
Curl_debug(data, CURLINFO_DATA_IN, k->str, nread);
}
if(conn->bits.chunk) {
/*
* Bless me father for I have sinned. Here comes a chunked
* transfer flying and we need to decode this properly. While
* the name says read, this function both reads and writes away
* the data. The returned 'nread' holds the number of actual
* data it wrote to the client. */
CHUNKcode res =
Curl_httpchunk_read(conn, k->str, nread, &nread);
if(conn->bits.chunk) {
/*
* Bless me father for I have sinned. Here comes a chunked
* transfer flying and we need to decode this properly. While
* the name says read, this function both reads and writes away
* the data. The returned 'nread' holds the number of actual
* data it wrote to the client. */
CHUNKcode res =
Curl_httpchunk_read(conn, k->str, nread, &nread);
if(CHUNKE_OK < res) {
if(CHUNKE_WRITE_ERROR == res) {
failf(data, "Failed writing data");
return CURLE_WRITE_ERROR;
if(CHUNKE_OK < res) {
if(CHUNKE_WRITE_ERROR == res) {
failf(data, "Failed writing data");
return CURLE_WRITE_ERROR;
}
failf(data, "Received problem in the chunky parser");
return CURLE_RECV_ERROR;
}
failf(data, "Received problem in the chunky parser");
return CURLE_RECV_ERROR;
else if(CHUNKE_STOP == res) {
/* we're done reading chunks! */
k->keepon &= ~KEEP_READ; /* read no more */
FD_ZERO(&k->rkeepfd);
/* There are now possibly N number of bytes at the end of the
str buffer that weren't written to the client, but we don't
care about them right now. */
}
/* If it returned OK, we just keep going */
}
else if(CHUNKE_STOP == res) {
/* we're done reading chunks! */
k->keepon &= ~KEEP_READ; /* read no more */
if((-1 != conn->maxdownload) &&
(k->bytecount + nread >= conn->maxdownload)) {
nread = conn->maxdownload - k->bytecount;
if(nread < 0 ) /* this should be unusual */
nread = 0;
k->keepon &= ~KEEP_READ; /* we're done reading */
FD_ZERO(&k->rkeepfd);
/* There are now possibly N number of bytes at the end of the
str buffer that weren't written to the client, but we don't
care about them right now. */
}
/* If it returned OK, we just keep going */
}
if((-1 != conn->maxdownload) &&
(k->bytecount + nread >= conn->maxdownload)) {
nread = conn->maxdownload - k->bytecount;
if(nread < 0 ) /* this should be unusual */
nread = 0;
k->bytecount += nread;
k->keepon &= ~KEEP_READ; /* we're done reading */
FD_ZERO(&k->rkeepfd);
}
k->bytecount += nread;
Curl_pgrsSetDownloadCounter(data, (double)k->bytecount);
Curl_pgrsSetDownloadCounter(data, (double)k->bytecount);
if(!conn->bits.chunk && (nread || k->badheader)) {
/* If this is chunky transfer, it was already written */
if(!conn->bits.chunk && (nread || k->badheader)) {
/* If this is chunky transfer, it was already written */
if(k->badheader) {
/* we parsed a piece of data wrongly assuming it was a header
and now we output it as body instead */
result = Curl_client_write(data, CLIENTWRITE_BODY,
data->state.headerbuff,
k->hbuflen);
}
if(k->badheader < HEADER_ALLBAD) {
/* This switch handles various content encodings. If there's an
error here, be sure to check over the almost identical code in
http_chunk.c. 08/29/02 jhrg */
#ifdef HAVE_LIBZ
switch (k->content_encoding) {
case IDENTITY:
#endif
/* This is the default when the server sends no
Content-Encoding header. See Curl_readwrite_init; the
memset() call initializes k->content_encoding to zero.
08/28/02 jhrg */
result = Curl_client_write(data, CLIENTWRITE_BODY, k->str,
nread);
#ifdef HAVE_LIBZ
break;
case DEFLATE:
/* Assume CLIENTWRITE_BODY; headers are not encoded. */
result = Curl_unencode_deflate_write(data, k, nread);
break;
case GZIP: /* FIXME 08/27/02 jhrg */
case COMPRESS:
default:
failf (data, "Unrecognized content encoding type. "
"libcurl understands `identity' and `deflate' "
"content encodings.");
result = CURLE_BAD_CONTENT_ENCODING;
break;
if(k->badheader) {
/* we parsed a piece of data wrongly assuming it was a header
and now we output it as body instead */
result = Curl_client_write(data, CLIENTWRITE_BODY,
data->state.headerbuff,
k->hbuflen);
}
if(k->badheader < HEADER_ALLBAD) {
/* This switch handles various content encodings. If there's an
error here, be sure to check over the almost identical code
in http_chunk.c. 08/29/02 jhrg */
#ifdef HAVE_LIBZ
switch (k->content_encoding) {
case IDENTITY:
#endif
/* This is the default when the server sends no
Content-Encoding header. See Curl_readwrite_init; the
memset() call initializes k->content_encoding to zero.
08/28/02 jhrg */
result = Curl_client_write(data, CLIENTWRITE_BODY, k->str,
nread);
#ifdef HAVE_LIBZ
break;
case DEFLATE:
/* Assume CLIENTWRITE_BODY; headers are not encoded. */
result = Curl_unencode_deflate_write(data, k, nread);
break;
case GZIP: /* FIXME 08/27/02 jhrg */
case COMPRESS:
default:
failf (data, "Unrecognized content encoding type. "
"libcurl understands `identity' and `deflate' "
"content encodings.");
result = CURLE_BAD_CONTENT_ENCODING;
break;
}
#endif
}
k->badheader = HEADER_NORMAL; /* taken care of now */
if(result)
return result;
}
k->badheader = HEADER_NORMAL; /* taken care of now */
if(result)
return result;
}
} /* if (! header and data to read ) */
} /* if (! header and data to read ) */
} while(!readdone);
@ -874,8 +874,8 @@ k->timeofdoc = curl_getdate(k->p+strlen("Last-Modified:"),
conn->upload_fromhere += 10; /* 32bit hex + CRLF */
}
nread = data->set.fread(conn->upload_fromhere, 1,
buffersize, data->set.in);
nread = conn->fread(conn->upload_fromhere, 1,
buffersize, conn->fread_in);
if(conn->bits.upload_chunky) {
/* if chunked Transfer-Encoding */
@ -944,12 +944,12 @@ k->timeofdoc = curl_getdate(k->p+strlen("Last-Modified:"),
that instead of reading more data */
}
/* write to socket */
/* write to socket (send away data) */
result = Curl_write(conn,
conn->writesockfd,
conn->upload_fromhere,
conn->upload_present,
&bytes_written);
conn->writesockfd, /* socket to send to */
conn->upload_fromhere, /* buffer pointer */
conn->upload_present, /* buffer size */
&bytes_written); /* actually send away */
if(result)
return result;
else if(conn->upload_present != bytes_written) {

View File

@ -1789,6 +1789,9 @@ static CURLcode CreateConnection(struct SessionHandle *data,
/* else, no chunky upload */
FALSE;
conn->fread = data->set.fread;
conn->fread_in = data->set.in;
/***********************************************************
* We need to allocate memory to store the path in. We get the size of the
* full URL to be sure, and we need to make it at least 256 bytes since

View File

@ -164,9 +164,6 @@ struct HTTP {
/* For FORM posting */
struct Form form;
curl_read_callback storefread;
FILE *in;
struct Curl_chunker chunk;
};
@ -458,6 +455,9 @@ struct connectdata {
and the 'upload_present' contains the number of bytes available at this
position */
char *upload_fromhere;
curl_read_callback fread; /* function that reads the input */
void *fread_in; /* pointer to pass to the fread() above */
};
/* The end of connectdata. 08/27/02 jhrg */
@ -633,7 +633,7 @@ struct UserDefined {
bool free_referer; /* set TRUE if 'referer' points to a string we
allocated */
char *useragent; /* User-Agent string */
char *encoding; /* Accept-Encoding string 08/28/02 jhrg */
char *encoding; /* Accept-Encoding string */
char *postfields; /* if POST, set the fields' values here */
size_t postfieldsize; /* if POST, this might have a size to use instead of
strlen(), and then the data *may* be binary (contain