#include "quakedef.h"
#include "fs.h"
#include "libcurl.h"
+#include "thread.h"
-static cvar_t cl_curl_maxdownloads = {1, "cl_curl_maxdownloads","1", "maximum number of concurrent HTTP/FTP downloads"};
-static cvar_t cl_curl_maxspeed = {1, "cl_curl_maxspeed","100", "maximum download speed (KiB/s)"};
-static cvar_t sv_curl_defaulturl = {1, "sv_curl_defaulturl","", "default autodownload source URL"};
-static cvar_t cl_curl_enabled = {1, "cl_curl_enabled","0", "whether client's download support is enabled"};
+#include "image.h"
+#include "jpeg.h"
+#include "image_png.h"
+
+static cvar_t cl_curl_maxdownloads = {CVAR_SAVE, "cl_curl_maxdownloads","1", "maximum number of concurrent HTTP/FTP downloads"};
+static cvar_t cl_curl_maxspeed = {CVAR_SAVE, "cl_curl_maxspeed","300", "maximum download speed (KiB/s)"};
+static cvar_t sv_curl_defaulturl = {CVAR_SAVE, "sv_curl_defaulturl","", "default autodownload source URL"};
+static cvar_t sv_curl_serverpackages = {CVAR_SAVE, "sv_curl_serverpackages","", "list of required files for the clients, separated by spaces"};
+static cvar_t sv_curl_maxspeed = {CVAR_SAVE, "sv_curl_maxspeed","0", "maximum download speed for clients downloading from sv_curl_defaulturl (KiB/s)"};
+static cvar_t cl_curl_enabled = {CVAR_SAVE, "cl_curl_enabled","1", "whether client's download support is enabled"};
+static cvar_t cl_curl_useragent = {0, "cl_curl_useragent","1", "send the User-Agent string (note: turning this off may break stuff)"};
+static cvar_t cl_curl_useragent_append = {0, "cl_curl_useragent_append","", "a string to append to the User-Agent string (useful for name and version number of your mod)"};
/*
=================================================================
typedef struct CURL_s CURL;
typedef struct CURLM_s CURLM;
+typedef struct curl_slist curl_slist;
typedef enum
{
CURLE_OK = 0
CINIT(URL, OBJECTPOINT, 2),
CINIT(ERRORBUFFER, OBJECTPOINT, 10),
CINIT(WRITEFUNCTION, FUNCTIONPOINT, 11),
+ CINIT(POSTFIELDS, OBJECTPOINT, 15),
CINIT(REFERER, OBJECTPOINT, 16),
CINIT(USERAGENT, OBJECTPOINT, 18),
+ CINIT(LOW_SPEED_LIMIT, LONG , 19),
+ CINIT(LOW_SPEED_TIME, LONG, 20),
CINIT(RESUME_FROM, LONG, 21),
+ CINIT(HTTPHEADER, OBJECTPOINT, 23),
+ CINIT(POST, LONG, 47), /* HTTP POST method */
CINIT(FOLLOWLOCATION, LONG, 52), /* use Location: Luke! */
+ CINIT(POSTFIELDSIZE, LONG, 60),
CINIT(PRIVATE, OBJECTPOINT, 103),
+ CINIT(PROTOCOLS, LONG, 181),
+ CINIT(REDIR_PROTOCOLS, LONG, 182)
}
CURLoption;
+#define CURLPROTO_HTTP (1<<0)
+#define CURLPROTO_HTTPS (1<<1)
+#define CURLPROTO_FTP (1<<2)
typedef enum
{
CURLINFO_TEXT = 0,
CURLINFO_PROXYAUTH_AVAIL = CURLINFO_LONG + 24,
CURLINFO_OS_ERRNO = CURLINFO_LONG + 25,
CURLINFO_NUM_CONNECTS = CURLINFO_LONG + 26,
- CURLINFO_SSL_ENGINES = CURLINFO_SLIST + 27,
+ CURLINFO_SSL_ENGINES = CURLINFO_SLIST + 27
}
CURLINFO;
CURLMsg;
static void (*qcurl_global_init) (long flags);
-static void (*qcurl_global_cleanup) ();
+static void (*qcurl_global_cleanup) (void);
-static CURL * (*qcurl_easy_init) ();
+static CURL * (*qcurl_easy_init) (void);
static void (*qcurl_easy_cleanup) (CURL *handle);
static CURLcode (*qcurl_easy_setopt) (CURL *handle, CURLoption option, ...);
static CURLcode (*qcurl_easy_getinfo) (CURL *handle, CURLINFO info, ...);
static const char * (*qcurl_easy_strerror) (CURLcode);
-static CURLM * (*qcurl_multi_init) ();
+static CURLM * (*qcurl_multi_init) (void);
static CURLMcode (*qcurl_multi_perform) (CURLM *multi_handle, int *running_handles);
static CURLMcode (*qcurl_multi_add_handle) (CURLM *multi_handle, CURL *easy_handle);
static CURLMcode (*qcurl_multi_remove_handle) (CURLM *multi_handle, CURL *easy_handle);
static CURLMsg * (*qcurl_multi_info_read) (CURLM *multi_handle, int *msgs_in_queue);
static void (*qcurl_multi_cleanup) (CURLM *);
static const char * (*qcurl_multi_strerror) (CURLcode);
+static curl_slist * (*qcurl_slist_append) (curl_slist *list, const char *string);
+static void (*qcurl_slist_free_all) (curl_slist *list);
static dllfunction_t curlfuncs[] =
{
{"curl_multi_info_read", (void **) &qcurl_multi_info_read},
{"curl_multi_cleanup", (void **) &qcurl_multi_cleanup},
{"curl_multi_strerror", (void **) &qcurl_multi_strerror},
+ {"curl_slist_append", (void **) &qcurl_slist_append},
+ {"curl_slist_free_all", (void **) &qcurl_slist_free_all},
{NULL, NULL}
};
static dllhandle_t curl_dll = NULL;
// will be checked at many places to find out if qcurl calls are allowed
+#define LOADTYPE_NONE 0
+#define LOADTYPE_PAK 1
+#define LOADTYPE_CACHEPIC 2
+#define LOADTYPE_SKINFRAME 3
+
+void *curl_mutex = NULL;
+
typedef struct downloadinfo_s
{
- char filename[MAX_QPATH];
- char url[256];
+ char filename[MAX_OSPATH];
+ char url[1024];
char referer[256];
qfile_t *stream;
fs_offset_t startpos;
CURL *curle;
qboolean started;
- qboolean ispak;
- unsigned long bytes_received;
+ int loadtype;
+ size_t bytes_received; // for buffer
+ double bytes_received_curl; // for throttling
+ double bytes_sent_curl; // for throttling
struct downloadinfo_s *next, *prev;
qboolean forthismap;
+ double maxspeed;
+ curl_slist *slist; // http headers
+
+ unsigned char *buffer;
+ size_t buffersize;
+ curl_callback_t callback;
+ void *callback_data;
+
+ const unsigned char *postbuf;
+ size_t postbufsize;
+ const char *post_content_type;
+ const char *extraheaders;
}
downloadinfo;
static downloadinfo *downloads = NULL;
static int numdownloads = 0;
+static qboolean noclear = FALSE;
+
+static int numdownloads_fail = 0;
+static int numdownloads_success = 0;
+static int numdownloads_added = 0;
+static char command_when_done[256] = "";
+static char command_when_error[256] = "";
+
+/*
+====================
+Curl_CommandWhenDone
+
+Sets the command which is to be executed when the last download completes AND
+all downloads since last server connect ended with a successful status.
+Setting the command to NULL clears it.
+====================
+*/
+static void Curl_CommandWhenDone(const char *cmd)
+{
+ if(!curl_dll)
+ return;
+ if(cmd)
+ strlcpy(command_when_done, cmd, sizeof(command_when_done));
+ else
+ *command_when_done = 0;
+}
+
+/*
+FIXME
+Do not use yet. Not complete.
+Problem: what counts as an error?
+*/
+
+static void Curl_CommandWhenError(const char *cmd)
+{
+ if(!curl_dll)
+ return;
+ if(cmd)
+ strlcpy(command_when_error, cmd, sizeof(command_when_error));
+ else
+ *command_when_error = 0;
+}
+
+/*
+====================
+Curl_Clear_forthismap
+
+Clears the "will disconnect on failure" flags.
+====================
+*/
+void Curl_Clear_forthismap(void)
+{
+ downloadinfo *di;
+ if(noclear)
+ return;
+ if (curl_mutex) Thread_LockMutex(curl_mutex);
+ for(di = downloads; di; di = di->next)
+ di->forthismap = false;
+ Curl_CommandWhenError(NULL);
+ Curl_CommandWhenDone(NULL);
+ numdownloads_fail = 0;
+ numdownloads_success = 0;
+ numdownloads_added = 0;
+ if (curl_mutex) Thread_UnlockMutex(curl_mutex);
+}
+
+/*
+====================
+Curl_Have_forthismap
+
+Returns true if a download needed for the current game is running.
+====================
+*/
+qboolean Curl_Have_forthismap(void)
+{
+ return numdownloads_added != 0;
+}
+
+void Curl_Register_predownload(void)
+{
+ if (curl_mutex) Thread_LockMutex(curl_mutex);
+ Curl_CommandWhenDone("cl_begindownloads");
+ Curl_CommandWhenError("cl_begindownloads");
+ if (curl_mutex) Thread_UnlockMutex(curl_mutex);
+}
+
+/*
+====================
+Curl_CheckCommandWhenDone
+
+Checks if a "done command" is to be executed.
+All downloads finished, at least one success since connect, no single failure
+-> execute the command.
+*/
+static void Curl_CheckCommandWhenDone(void)
+{
+ if(!curl_dll)
+ return;
+ if(numdownloads_added && ((numdownloads_success + numdownloads_fail) == numdownloads_added))
+ {
+ if(numdownloads_fail == 0)
+ {
+ Con_DPrintf("cURL downloads occurred, executing %s\n", command_when_done);
+ Cbuf_AddText("\n");
+ Cbuf_AddText(command_when_done);
+ Cbuf_AddText("\n");
+ }
+ else
+ {
+ Con_DPrintf("cURL downloads FAILED, executing %s\n", command_when_error);
+ Cbuf_AddText("\n");
+ Cbuf_AddText(command_when_error);
+ Cbuf_AddText("\n");
+ }
+ Curl_Clear_forthismap();
+ }
+}
+
/*
====================
CURL_CloseLibrary
{
const char* dllnames [] =
{
-#if defined(WIN64)
- "libcurl64.dll",
-#elif defined(WIN32)
+#if defined(WIN32)
+ "libcurl-4.dll",
"libcurl-3.dll",
#elif defined(MACOSX)
+ "libcurl.4.dylib", // Mac OS X Notyetreleased
"libcurl.3.dylib", // Mac OS X Tiger
"libcurl.2.dylib", // Mac OS X Panther
#else
+ "libcurl.so.4",
"libcurl.so.3",
+ "libcurl.so", // FreeBSD
#endif
NULL
};
return true;
// Load the DLL
- if (! Sys_LoadLibrary (dllnames, &curl_dll, curlfuncs))
- {
- Con_Printf ("cURL support disabled\n");
- return false;
- }
-
- Con_Printf ("cURL support enabled\n");
- return true;
+ return Sys_LoadLibrary (dllnames, &curl_dll, curlfuncs);
}
static CURLM *curlm = NULL;
-static unsigned long bytes_received = 0; // used for bandwidth throttling
+static double bytes_received = 0; // used for bandwidth throttling
+static double bytes_sent = 0; // used for bandwidth throttling
static double curltime = 0;
/*
*/
static size_t CURL_fwrite(void *data, size_t size, size_t nmemb, void *vdi)
{
- fs_offset_t ret;
+ fs_offset_t ret = -1;
size_t bytes = size * nmemb;
downloadinfo *di = (downloadinfo *) vdi;
- bytes_received += bytes;
- di->bytes_received += bytes;
+ if(di->buffer)
+ {
+ if(di->bytes_received + bytes <= di->buffersize)
+ {
+ memcpy(di->buffer + di->bytes_received, data, bytes);
+ ret = bytes;
+ }
+ // otherwise: buffer overrun, ret stays -1
+ }
+
+ if(di->stream)
+ {
+ ret = FS_Write(di->stream, data, bytes);
+ }
- ret = FS_Write(di->stream, data, bytes);
+ di->bytes_received += bytes;
- return ret; // why not ret / nmemb?
+ return ret;
+ // Why not ret / nmemb?
+ // Because CURLOPT_WRITEFUNCTION docs say to return the number of bytes.
+ // Yes, this is incompatible to fwrite(2).
}
typedef enum
}
CurlStatus;
-/*
-====================
-Curl_Clear_forthismap
+static void curl_default_callback(int status, size_t length_received, unsigned char *buffer, void *cbdata)
+{
+ downloadinfo *di = (downloadinfo *) cbdata;
+ switch(status)
+ {
+ case CURLCBSTATUS_OK:
+ Con_DPrintf("Download of %s: OK\n", di->filename);
+ break;
+ case CURLCBSTATUS_FAILED:
+ Con_DPrintf("Download of %s: FAILED\n", di->filename);
+ break;
+ case CURLCBSTATUS_ABORTED:
+ Con_DPrintf("Download of %s: ABORTED\n", di->filename);
+ break;
+ case CURLCBSTATUS_SERVERERROR:
+ Con_DPrintf("Download of %s: (unknown server error)\n", di->filename);
+ break;
+ case CURLCBSTATUS_UNKNOWN:
+ Con_DPrintf("Download of %s: (unknown client error)\n", di->filename);
+ break;
+ default:
+ Con_DPrintf("Download of %s: %d\n", di->filename, status);
+ break;
+ }
+}
-Clears the "will disconnect on failure" flags.
-====================
-*/
-void Curl_Clear_forthismap()
+static void curl_quiet_callback(int status, size_t length_received, unsigned char *buffer, void *cbdata)
{
- downloadinfo *di;
- for(di = downloads; di; di = di->next)
- di->forthismap = false;
+ curl_default_callback(status, length_received, buffer, cbdata);
}
-static qboolean Curl_Have_forthismap()
+static unsigned char *decode_image(downloadinfo *di, const char *content_type)
{
- downloadinfo *di;
- for(di = downloads; di; di = di->next)
- if(di->forthismap)
- return true;
- return false;
+ unsigned char *pixels = NULL;
+ fs_offset_t filesize = 0;
+ unsigned char *data = FS_LoadFile(di->filename, tempmempool, true, &filesize);
+ if(data)
+ {
+ int mip = 0;
+ if(!strcmp(content_type, "image/jpeg"))
+ pixels = JPEG_LoadImage_BGRA(data, filesize, &mip);
+ else if(!strcmp(content_type, "image/png"))
+ pixels = PNG_LoadImage_BGRA(data, filesize, &mip);
+ else if(filesize >= 7 && !strncmp((char *) data, "\xFF\xD8", 7))
+ pixels = JPEG_LoadImage_BGRA(data, filesize, &mip);
+ else if(filesize >= 7 && !strncmp((char *) data, "\x89PNG\x0D\x0A\x1A\x0A", 7))
+ pixels = PNG_LoadImage_BGRA(data, filesize, &mip);
+ else
+ Con_Printf("Did not detect content type: %s\n", content_type);
+ Mem_Free(data);
+ }
+ // do we call Image_MakeLinearColorsFromsRGB or not?
+ return pixels;
}
/*
code from libcurl, or 0, if another error has occurred.
====================
*/
-static void Curl_EndDownload(downloadinfo *di, CurlStatus status, CURLcode error)
+static qboolean Curl_Begin(const char *URL, const char *extraheaders, double maxspeed, const char *name, int loadtype, qboolean forthismap, const char *post_content_type, const unsigned char *postbuf, size_t postbufsize, unsigned char *buf, size_t bufsize, curl_callback_t callback, void *cbdata);
+static void Curl_EndDownload(downloadinfo *di, CurlStatus status, CURLcode error, const char *content_type_)
{
+ char content_type[64];
qboolean ok = false;
if(!curl_dll)
return;
switch(status)
{
case CURL_DOWNLOAD_SUCCESS:
- Con_Printf("Download of %s: OK\n", di->filename);
ok = true;
+ di->callback(CURLCBSTATUS_OK, di->bytes_received, di->buffer, di->callback_data);
break;
case CURL_DOWNLOAD_FAILED:
- Con_Printf("Download of %s: FAILED\n", di->filename);
- if(error)
- Con_Printf("Reason given by libcurl: %s\n", qcurl_easy_strerror(error));
+ di->callback(CURLCBSTATUS_FAILED, di->bytes_received, di->buffer, di->callback_data);
break;
case CURL_DOWNLOAD_ABORTED:
- Con_Printf("Download of %s: ABORTED\n", di->filename);
+ di->callback(CURLCBSTATUS_ABORTED, di->bytes_received, di->buffer, di->callback_data);
break;
case CURL_DOWNLOAD_SERVERERROR:
- Con_Printf("Download of %s: %d\n", di->filename, (int) error);
-
// reopen to enforce it to have zero bytes again
- FS_Close(di->stream);
- di->stream = FS_Open(di->filename, "w", false, false);
+ if(di->stream)
+ {
+ FS_Close(di->stream);
+ di->stream = FS_OpenRealFile(di->filename, "wb", false);
+ }
+ if(di->callback)
+ di->callback(error ? (int) error : CURLCBSTATUS_SERVERERROR, di->bytes_received, di->buffer, di->callback_data);
+ break;
+ default:
+ if(di->callback)
+ di->callback(CURLCBSTATUS_UNKNOWN, di->bytes_received, di->buffer, di->callback_data);
break;
}
+ if(content_type_)
+ strlcpy(content_type, content_type_, sizeof(content_type));
+ else
+ *content_type = 0;
if(di->curle)
{
qcurl_multi_remove_handle(curlm, di->curle);
qcurl_easy_cleanup(di->curle);
+ if(di->slist)
+ qcurl_slist_free_all(di->slist);
}
- if(ok && !di->bytes_received)
+ if(!di->callback && ok && !di->bytes_received)
{
Con_Printf("ERROR: empty file\n");
ok = false;
if(di->stream)
FS_Close(di->stream);
- if(ok && di->ispak)
+#define CLEAR_AND_RETRY() \
+ do \
+ { \
+ di->stream = FS_OpenRealFile(di->filename, "wb", false); \
+ FS_Close(di->stream); \
+ if(di->startpos && !di->callback) \
+ { \
+ Curl_Begin(di->url, di->extraheaders, di->maxspeed, di->filename, di->loadtype, di->forthismap, di->post_content_type, di->postbuf, di->postbufsize, NULL, 0, NULL, NULL); \
+ di->forthismap = false; \
+ } \
+ } \
+ while(0)
+
+ if(ok && di->loadtype == LOADTYPE_PAK)
{
ok = FS_AddPack(di->filename, NULL, true);
- if(ok && di->forthismap)
- Mod_Reload();
+ if(!ok)
+ CLEAR_AND_RETRY();
}
+ else if(ok && di->loadtype == LOADTYPE_CACHEPIC)
+ {
+ const char *p;
+ unsigned char *pixels = NULL;
- if(!ok && di->forthismap)
+ p = di->filename;
+#ifdef WE_ARE_EVIL
+ if(!strncmp(p, "dlcache/", 8))
+ p += 8;
+#endif
+
+ pixels = decode_image(di, content_type);
+ if(pixels)
+ Draw_NewPic(p, image_width, image_height, true, pixels);
+ else
+ CLEAR_AND_RETRY();
+ }
+ else if(ok && di->loadtype == LOADTYPE_SKINFRAME)
{
- // BAD. Something went totally wrong.
- // The best we can do is clean up the forthismap flags...
- Curl_Clear_forthismap();
- // and disconnect.
- CL_Disconnect_f();
+ const char *p;
+ unsigned char *pixels = NULL;
+
+ p = di->filename;
+#ifdef WE_ARE_EVIL
+ if(!strncmp(p, "dlcache/", 8))
+ p += 8;
+#endif
+
+ pixels = decode_image(di, content_type);
+ if(pixels)
+ R_SkinFrame_LoadInternalBGRA(p, TEXF_FORCE_RELOAD | TEXF_MIPMAP | TEXF_ALPHA, pixels, image_width, image_height, false); // TODO what sRGB argument to put here?
+ else
+ CLEAR_AND_RETRY();
}
if(di->prev)
downloads = di->next;
if(di->next)
di->next->prev = di->prev;
- Z_Free(di);
--numdownloads;
+ if(di->forthismap)
+ {
+ if(ok)
+ ++numdownloads_success;
+ else
+ ++numdownloads_fail;
+ }
+ Z_Free(di);
+}
+
+/*
+====================
+CleanURL
+
+Returns a "cleaned up" URL for display (to strip login data)
+====================
+*/
+static const char *CleanURL(const char *url, char *urlbuf, size_t urlbuflength)
+{
+ const char *p, *q, *r;
+
+ // if URL is of form anything://foo-without-slash@rest, replace by anything://rest
+ p = strstr(url, "://");
+ if(p)
+ {
+ q = strchr(p + 3, '@');
+ if(q)
+ {
+ r = strchr(p + 3, '/');
+ if(!r || q < r)
+ {
+ dpsnprintf(urlbuf, urlbuflength, "%.*s%s", (int)(p - url + 3), url, q + 1);
+ return urlbuf;
+ }
+ }
+ }
+
+ return url;
}
/*
up to a maximum number of cl_curl_maxdownloads are running.
====================
*/
-static void CheckPendingDownloads()
+static void CheckPendingDownloads(void)
{
+ const char *h;
+ char urlbuf[1024];
+ char vabuf[1024];
if(!curl_dll)
return;
if(numdownloads < cl_curl_maxdownloads.integer)
{
if(!di->started)
{
- Con_Printf("Downloading %s -> %s", di->url, di->filename);
+ if(!di->buffer)
+ {
+ Con_Printf("Downloading %s -> %s", CleanURL(di->url, urlbuf, sizeof(urlbuf)), di->filename);
- di->stream = FS_Open(di->filename, "ab", false, false);
- if(!di->stream)
+ di->stream = FS_OpenRealFile(di->filename, "ab", false);
+ if(!di->stream)
+ {
+ Con_Printf("\nFAILED: Could not open output file %s\n", di->filename);
+ Curl_EndDownload(di, CURL_DOWNLOAD_FAILED, CURLE_OK, NULL);
+ return;
+ }
+ FS_Seek(di->stream, 0, SEEK_END);
+ di->startpos = FS_Tell(di->stream);
+
+ if(di->startpos > 0)
+ Con_Printf(", resuming from position %ld", (long) di->startpos);
+ Con_Print("...\n");
+ }
+ else
{
- Con_Printf("\nFAILED: Could not open output file %s\n", di->filename);
- Curl_EndDownload(di, CURL_DOWNLOAD_FAILED, CURLE_OK);
- return;
+ Con_DPrintf("Downloading %s -> memory\n", CleanURL(di->url, urlbuf, sizeof(urlbuf)));
+ di->startpos = 0;
}
- FS_Seek(di->stream, 0, SEEK_END);
- di->startpos = FS_Tell(di->stream);
- if(di->startpos > 0)
- Con_Printf(", resuming from position %ld", (long) di->startpos);
- Con_Print("...\n");
-
di->curle = qcurl_easy_init();
+ di->slist = NULL;
qcurl_easy_setopt(di->curle, CURLOPT_URL, di->url);
- qcurl_easy_setopt(di->curle, CURLOPT_USERAGENT, engineversion);
+ if(cl_curl_useragent.integer)
+ {
+ const char *ua
+#ifdef HTTP_USER_AGENT
+ = HTTP_USER_AGENT;
+#else
+ = engineversion;
+#endif
+ if(!ua)
+ ua = "";
+ if(*cl_curl_useragent_append.string)
+ ua = va(vabuf, sizeof(vabuf), "%s%s%s",
+ ua,
+ (ua[0] && ua[strlen(ua)-1] != ' ')
+ ? " "
+ : "",
+ cl_curl_useragent_append.string);
+ qcurl_easy_setopt(di->curle, CURLOPT_USERAGENT, ua);
+ }
+ else
+ qcurl_easy_setopt(di->curle, CURLOPT_USERAGENT, "");
qcurl_easy_setopt(di->curle, CURLOPT_REFERER, di->referer);
qcurl_easy_setopt(di->curle, CURLOPT_RESUME_FROM, (long) di->startpos);
qcurl_easy_setopt(di->curle, CURLOPT_FOLLOWLOCATION, 1);
qcurl_easy_setopt(di->curle, CURLOPT_WRITEFUNCTION, CURL_fwrite);
+ qcurl_easy_setopt(di->curle, CURLOPT_LOW_SPEED_LIMIT, (long) 256);
+ qcurl_easy_setopt(di->curle, CURLOPT_LOW_SPEED_TIME, (long) 45);
qcurl_easy_setopt(di->curle, CURLOPT_WRITEDATA, (void *) di);
qcurl_easy_setopt(di->curle, CURLOPT_PRIVATE, (void *) di);
+ qcurl_easy_setopt(di->curle, CURLOPT_PROTOCOLS, CURLPROTO_HTTP | CURLPROTO_HTTPS | CURLPROTO_FTP);
+ if(qcurl_easy_setopt(di->curle, CURLOPT_REDIR_PROTOCOLS, CURLPROTO_HTTP | CURLPROTO_HTTPS | CURLPROTO_FTP) != CURLE_OK)
+ {
+ Con_Printf("^1WARNING:^7 for security reasons, please upgrade to libcurl 7.19.4 or above. In a later version of DarkPlaces, HTTP redirect support will be disabled for this libcurl version.\n");
+ //qcurl_easy_setopt(di->curle, CURLOPT_FOLLOWLOCATION, 0);
+ }
+ if(di->post_content_type)
+ {
+ qcurl_easy_setopt(di->curle, CURLOPT_POST, 1);
+ qcurl_easy_setopt(di->curle, CURLOPT_POSTFIELDS, di->postbuf);
+ qcurl_easy_setopt(di->curle, CURLOPT_POSTFIELDSIZE, di->postbufsize);
+ di->slist = qcurl_slist_append(di->slist, va(vabuf, sizeof(vabuf), "Content-Type: %s", di->post_content_type));
+ }
+
+ // parse extra headers into slist
+ // \n separated list!
+ h = di->extraheaders;
+ while(h)
+ {
+ const char *hh = strchr(h, '\n');
+ if(hh)
+ {
+ char *buf = (char *) Mem_Alloc(tempmempool, hh - h + 1);
+ memcpy(buf, h, hh - h);
+ buf[hh - h] = 0;
+ di->slist = qcurl_slist_append(di->slist, buf);
+ h = hh + 1;
+ }
+ else
+ {
+ di->slist = qcurl_slist_append(di->slist, h);
+ h = NULL;
+ }
+ }
+
+ qcurl_easy_setopt(di->curle, CURLOPT_HTTPHEADER, di->slist);
+
qcurl_multi_add_handle(curlm, di->curle);
di->started = true;
++numdownloads;
On Win32, this must be called AFTER WSAStartup has been done!
====================
*/
-void Curl_Init()
+void Curl_Init(void)
{
CURL_OpenLibrary();
if(!curl_dll)
return;
+ if (Thread_HasThreads()) curl_mutex = Thread_CreateMutex();
qcurl_global_init(CURL_GLOBAL_NOTHING);
curlm = qcurl_multi_init();
}
Surprise... closes all the stuff. Please do this BEFORE shutting down LHNET.
====================
*/
-void Curl_ClearRequirements();
-void Curl_Shutdown()
+void Curl_ClearRequirements(void);
+void Curl_Shutdown(void)
{
if(!curl_dll)
return;
Curl_ClearRequirements();
Curl_CancelAll();
+ if (curl_mutex) Thread_DestroyMutex(curl_mutex);
CURL_CloseLibrary();
curl_dll = NULL;
}
return NULL;
}
+void Curl_Cancel_ToMemory(curl_callback_t callback, void *cbdata)
+{
+ downloadinfo *di;
+ if(!curl_dll)
+ return;
+ for(di = downloads; di; )
+ {
+ if(di->callback == callback && di->callback_data == cbdata)
+ {
+ di->callback = curl_quiet_callback; // do NOT call the callback
+ Curl_EndDownload(di, CURL_DOWNLOAD_ABORTED, CURLE_OK, NULL);
+ di = downloads;
+ }
+ else
+ di = di->next;
+ }
+}
+
/*
====================
Curl_Begin
if given) in the "dlcache/" folder.
====================
*/
-void Curl_Begin(const char *URL, const char *name, qboolean ispak, qboolean forthismap)
+static qboolean Curl_Begin(const char *URL, const char *extraheaders, double maxspeed, const char *name, int loadtype, qboolean forthismap, const char *post_content_type, const unsigned char *postbuf, size_t postbufsize, unsigned char *buf, size_t bufsize, curl_callback_t callback, void *cbdata)
{
- if(!curl_dll)
- return;
+ if(buf)
+ if(loadtype != LOADTYPE_NONE)
+ Host_Error("Curl_Begin: loadtype and buffer are both set");
+
+ if(!curl_dll || !cl_curl_enabled.integer)
+ {
+ return false;
+ }
else
{
- char fn[MAX_QPATH];
+ char fn[MAX_OSPATH];
+ char urlbuf[1024];
const char *p, *q;
size_t length;
downloadinfo *di;
+ // if URL is protocol:///* or protocol://:port/*, insert the IP of the current server
+ p = strchr(URL, ':');
+ if(p)
+ {
+ if(!strncmp(p, ":///", 4) || !strncmp(p, "://:", 4))
+ {
+ char addressstring[128];
+ *addressstring = 0;
+ InfoString_GetValue(cls.userinfo, "*ip", addressstring, sizeof(addressstring));
+ q = strchr(addressstring, ':');
+ if(!q)
+ q = addressstring + strlen(addressstring);
+ if(*addressstring)
+ {
+ dpsnprintf(urlbuf, sizeof(urlbuf), "%.*s://%.*s%s", (int) (p - URL), URL, (int) (q - addressstring), addressstring, URL + (p - URL) + 3);
+ URL = urlbuf;
+ }
+ }
+ }
+
// Note: This extraction of the file name portion is NOT entirely correct.
//
// It does the following:
//
// 141.2.16.3 - - [17/Mar/2006:22:32:43 +0100] "GET /maps/tznex07.pk3 HTTP/1.1" 200 1077455 "dp://141.2.16.7:26000/" "Nexuiz Linux 22:07:43 Mar 17 2006"
- if(!name)
- name = URL;
- p = strrchr(name, '/');
- p = p ? (p+1) : name;
- q = strchr(p, '?');
- length = q ? (size_t)(q - p) : strlen(p);
- dpsnprintf(fn, sizeof(fn), "dlcache/%.*s", (int)length, p);
-
+ if (curl_mutex) Thread_LockMutex(curl_mutex);
- // already downloading the file?
+ if(buf)
{
- downloadinfo *di = Curl_Find(fn);
- if(di)
- {
- Con_Printf("Can't download %s, already getting it from %s!\n", fn, di->url);
-
- // however, if it was not for this map yet...
- if(forthismap)
- di->forthismap = true;
-
- return;
- }
+ if(!name)
+ name = CleanURL(URL, urlbuf, sizeof(urlbuf));
}
-
- if(ispak && FS_FileExists(fn))
+ else
{
- qboolean already_loaded;
- if(FS_AddPack(fn, &already_loaded, true))
+ if(!name)
{
- Con_DPrintf("%s already exists, not downloading!\n", fn);
- if(already_loaded)
- Con_DPrintf("(pak was already loaded)\n");
- else
- if(forthismap)
- Mod_Reload();
- return;
+ name = CleanURL(URL, urlbuf, sizeof(urlbuf));
+ p = strrchr(name, '/');
+ p = p ? (p+1) : name;
+ q = strchr(p, '?');
+ length = q ? (size_t)(q - p) : strlen(p);
+ dpsnprintf(fn, sizeof(fn), "dlcache/%.*s", (int)length, p);
}
else
{
- qfile_t *f = FS_Open(fn, "rb", false, false);
- if(f)
+ dpsnprintf(fn, sizeof(fn), "dlcache/%s", name);
+ }
+
+ name = fn; // make it point back
+
+ // already downloading the file?
+ {
+ downloadinfo *di = Curl_Find(fn);
+ if(di)
{
- char buf[4] = {0};
- FS_Read(f, buf, sizeof(buf)); // no "-1", I will use memcmp
+ Con_Printf("Can't download %s, already getting it from %s!\n", fn, CleanURL(di->url, urlbuf, sizeof(urlbuf)));
- if(memcmp(buf, "PK\x03\x04", 4) && memcmp(buf, "PACK", 4))
+ // however, if it was not for this map yet...
+ if(forthismap && !di->forthismap)
{
- Con_DPrintf("Detected non-PAK %s, clearing and NOT resuming.\n", fn);
- FS_Close(f);
- f = FS_Open(fn, "w", false, false);
- if(f)
- FS_Close(f);
+ di->forthismap = true;
+ // this "fakes" a download attempt so the client will wait for
+ // the download to finish and then reconnect
+ ++numdownloads_added;
+ }
+
+ if (curl_mutex) Thread_UnlockMutex(curl_mutex);
+ return false;
+ }
+ }
+
+ if(FS_FileExists(fn))
+ {
+ if(loadtype == LOADTYPE_PAK)
+ {
+ qboolean already_loaded;
+ if(FS_AddPack(fn, &already_loaded, true))
+ {
+ Con_DPrintf("%s already exists, not downloading!\n", fn);
+ if(already_loaded)
+ Con_DPrintf("(pak was already loaded)\n");
+ else
+ {
+ if(forthismap)
+ {
+ ++numdownloads_added;
+ ++numdownloads_success;
+ }
+ }
+
+ if (curl_mutex) Thread_UnlockMutex(curl_mutex);
+ return false;
}
else
{
- // OK
- FS_Close(f);
+ qfile_t *f = FS_OpenRealFile(fn, "rb", false);
+ if(f)
+ {
+ char buf[4] = {0};
+ FS_Read(f, buf, sizeof(buf)); // no "-1", I will use memcmp
+
+ if(memcmp(buf, "PK\x03\x04", 4) && memcmp(buf, "PACK", 4))
+ {
+ Con_DPrintf("Detected non-PAK %s, clearing and NOT resuming.\n", fn);
+ FS_Close(f);
+ f = FS_OpenRealFile(fn, "wb", false);
+ if(f)
+ FS_Close(f);
+ }
+ else
+ {
+ // OK
+ FS_Close(f);
+ }
+ }
}
}
+ else
+ {
+ // never resume these
+ qfile_t *f = FS_OpenRealFile(fn, "wb", false);
+ if(f)
+ FS_Close(f);
+ }
}
}
+ // if we get here, we actually want to download... so first verify the
+ // URL scheme (so one can't read local files using file://)
+ if(strncmp(URL, "http://", 7) && strncmp(URL, "ftp://", 6) && strncmp(URL, "https://", 8))
+ {
+ Con_Printf("Curl_Begin(\"%s\"): nasty URL scheme rejected\n", URL);
+ if (curl_mutex) Thread_UnlockMutex(curl_mutex);
+ return false;
+ }
+
+ if(forthismap)
+ ++numdownloads_added;
di = (downloadinfo *) Z_Malloc(sizeof(*di));
- strlcpy(di->filename, fn, sizeof(di->filename));
+ strlcpy(di->filename, name, sizeof(di->filename));
strlcpy(di->url, URL, sizeof(di->url));
dpsnprintf(di->referer, sizeof(di->referer), "dp://%s/", cls.netcon ? cls.netcon->address : "notconnected.invalid");
di->forthismap = forthismap;
di->startpos = 0;
di->curle = NULL;
di->started = false;
- di->ispak = ispak;
+ di->loadtype = loadtype;
+ di->maxspeed = maxspeed;
di->bytes_received = 0;
+ di->bytes_received_curl = 0;
+ di->bytes_sent_curl = 0;
+ di->extraheaders = extraheaders;
di->next = downloads;
di->prev = NULL;
if(di->next)
di->next->prev = di;
+
+ di->buffer = buf;
+ di->buffersize = bufsize;
+ if(callback == NULL)
+ {
+ di->callback = curl_default_callback;
+ di->callback_data = di;
+ }
+ else
+ {
+ di->callback = callback;
+ di->callback_data = cbdata;
+ }
+
+ if(post_content_type)
+ {
+ di->post_content_type = post_content_type;
+ di->postbuf = postbuf;
+ di->postbufsize = postbufsize;
+ }
+ else
+ {
+ di->post_content_type = NULL;
+ di->postbuf = NULL;
+ di->postbufsize = 0;
+ }
+
downloads = di;
+ if (curl_mutex) Thread_UnlockMutex(curl_mutex);
+ return true;
}
}
+qboolean Curl_Begin_ToFile(const char *URL, double maxspeed, const char *name, int loadtype, qboolean forthismap)
+{
+ return Curl_Begin(URL, NULL, maxspeed, name, loadtype, forthismap, NULL, NULL, 0, NULL, 0, NULL, NULL);
+}
+qboolean Curl_Begin_ToMemory(const char *URL, double maxspeed, unsigned char *buf, size_t bufsize, curl_callback_t callback, void *cbdata)
+{
+ return Curl_Begin(URL, NULL, maxspeed, NULL, false, false, NULL, NULL, 0, buf, bufsize, callback, cbdata);
+}
+qboolean Curl_Begin_ToMemory_POST(const char *URL, const char *extraheaders, double maxspeed, const char *post_content_type, const unsigned char *postbuf, size_t postbufsize, unsigned char *buf, size_t bufsize, curl_callback_t callback, void *cbdata)
+{
+ return Curl_Begin(URL, extraheaders, maxspeed, NULL, false, false, post_content_type, postbuf, postbufsize, buf, bufsize, callback, cbdata);
+}
/*
====================
blocking.
====================
*/
-void Curl_Run()
+void Curl_Run(void)
{
+ double maxspeed;
+ downloadinfo *di;
+
+ noclear = FALSE;
+
if(!cl_curl_enabled.integer)
return;
if(!curl_dll)
return;
+ if (curl_mutex) Thread_LockMutex(curl_mutex);
+
+ Curl_CheckCommandWhenDone();
+
if(!downloads)
+ {
+ if (curl_mutex) Thread_UnlockMutex(curl_mutex);
return;
+ }
if(realtime < curltime) // throttle
+ {
+ if (curl_mutex) Thread_UnlockMutex(curl_mutex);
return;
+ }
{
int remaining;
CURLMcode mc;
-
+
do
{
mc = qcurl_multi_perform(curlm, &remaining);
}
while(mc == CURLM_CALL_MULTI_PERFORM);
+ for(di = downloads; di; di = di->next)
+ {
+ double b = 0;
+ if(di->curle)
+ {
+ qcurl_easy_getinfo(di->curle, CURLINFO_SIZE_UPLOAD, &b);
+ bytes_sent += (b - di->bytes_sent_curl);
+ di->bytes_sent_curl = b;
+ qcurl_easy_getinfo(di->curle, CURLINFO_SIZE_DOWNLOAD, &b);
+ bytes_sent += (b - di->bytes_received_curl);
+ di->bytes_received_curl = b;
+ }
+ }
+
for(;;)
{
CURLMsg *msg = qcurl_multi_info_read(curlm, &remaining);
break;
if(msg->msg == CURLMSG_DONE)
{
- downloadinfo *di;
+ const char *ct = NULL;
CurlStatus failed = CURL_DOWNLOAD_SUCCESS;
CURLcode result;
-
qcurl_easy_getinfo(msg->easy_handle, CURLINFO_PRIVATE, &di);
result = msg->data.result;
if(result)
case 4: // e.g. 404?
case 5: // e.g. 500?
failed = CURL_DOWNLOAD_SERVERERROR;
- result = code;
+ result = (CURLcode) code;
break;
}
+ qcurl_easy_getinfo(msg->easy_handle, CURLINFO_CONTENT_TYPE, &ct);
}
-
- Curl_EndDownload(di, failed, result);
+
+ Curl_EndDownload(di, failed, result, ct);
}
}
}
// when will we curl the next time?
// we will wait a bit to ensure our download rate is kept.
// we now know that realtime >= curltime... so set up a new curltime
- if(cl_curl_maxspeed.value > 0)
+
+ // use the slowest allowing download to derive the maxspeed... this CAN
+ // be done better, but maybe later
+ maxspeed = cl_curl_maxspeed.value;
+ for(di = downloads; di; di = di->next)
+ if(di->maxspeed > 0)
+ if(di->maxspeed < maxspeed || maxspeed <= 0)
+ maxspeed = di->maxspeed;
+
+ if(maxspeed > 0)
{
- unsigned long bytes = bytes_received; // maybe smoothen a bit?
- curltime = realtime + bytes / (cl_curl_maxspeed.value * 1024.0);
- bytes_received -= bytes;
+ double bytes = bytes_sent + bytes_received; // maybe smoothen a bit?
+ curltime = realtime + bytes / (maxspeed * 1024.0);
+ bytes_sent = 0;
+ bytes_received = 0;
}
else
curltime = realtime;
+
+ if (curl_mutex) Thread_UnlockMutex(curl_mutex);
}
/*
Stops ALL downloads.
====================
*/
-void Curl_CancelAll()
+void Curl_CancelAll(void)
{
if(!curl_dll)
return;
+ if (curl_mutex) Thread_LockMutex(curl_mutex);
+
while(downloads)
{
- Curl_EndDownload(downloads, CURL_DOWNLOAD_ABORTED, CURLE_OK);
+ Curl_EndDownload(downloads, CURL_DOWNLOAD_ABORTED, CURLE_OK, NULL);
// INVARIANT: downloads will point to the next download after that!
}
+
+ if (curl_mutex) Thread_UnlockMutex(curl_mutex);
}
/*
returns true iff there is a download running.
====================
*/
-qboolean Curl_Running()
+qboolean Curl_Running(void)
{
if(!curl_dll)
return false;
double length;
qcurl_easy_getinfo(di->curle, CURLINFO_CONTENT_LENGTH_DOWNLOAD, &length);
if(length > 0)
- return di->bytes_received / length;
+ return (di->startpos + di->bytes_received) / (di->startpos + length);
else
return 0;
}
====================
*/
// TODO rewrite using Curl_GetDownloadInfo?
-static void Curl_Info_f()
+static void Curl_Info_f(void)
{
downloadinfo *di;
+ char urlbuf[1024];
if(!curl_dll)
return;
if(Curl_Running())
{
+ if (curl_mutex) Thread_LockMutex(curl_mutex);
Con_Print("Currently running downloads:\n");
for(di = downloads; di; di = di->next)
{
double speed, percent;
- Con_Printf(" %s -> %s ", di->url, di->filename);
+ Con_Printf(" %s -> %s ", CleanURL(di->url, urlbuf, sizeof(urlbuf)), di->filename);
percent = 100.0 * Curl_GetDownloadAmount(di);
speed = Curl_GetDownloadSpeed(di);
if(percent >= 0)
else
Con_Print("(queued)\n");
}
+ if (curl_mutex) Thread_UnlockMutex(curl_mutex);
}
else
{
curl [--pak] [--forthismap] [--for filename filename...] url
--pak: after downloading, load the package into the virtual file system
--for filename...: only download of at least one of the named files is missing
- --forthismap: disconnect on failure
+ --forthismap: don't reconnect on failure
+
+curl --clear_autodownload
+ clears the download success/failure counters
+
+curl --finish_autodownload
+ if at least one download has been started, disconnect and drop to the menu
+ once the last download completes successfully, reconnect to the current server
====================
*/
-void Curl_Curl_f(void)
+static void Curl_Curl_f(void)
{
+ double maxspeed = 0;
int i;
int end;
- qboolean pak = false;
+ int loadtype = LOADTYPE_NONE;
qboolean forthismap = false;
const char *url;
const char *name = 0;
return;
}
- for(i = 0; i != Cmd_Argc(); ++i)
- Con_DPrintf("%s ", Cmd_Argv(i));
- Con_DPrint("\n");
-
if(Cmd_Argc() < 2)
{
Con_Print("usage:\ncurl --info, curl --cancel [filename], curl url\n");
else
{
downloadinfo *di = Curl_Find(url);
- Curl_EndDownload(di, CURL_DOWNLOAD_ABORTED, CURLE_OK);
+ if(di)
+ Curl_EndDownload(di, CURL_DOWNLOAD_ABORTED, CURLE_OK, NULL);
+ else
+ Con_Print("download not found\n");
}
return;
}
else if(!strcmp(a, "--pak"))
{
- pak = true;
+ loadtype = LOADTYPE_PAK;
+ }
+ else if(!strcmp(a, "--cachepic"))
+ {
+ loadtype = LOADTYPE_CACHEPIC;
}
- else if(!strcmp(a, "--for"))
+ else if(!strcmp(a, "--skinframe"))
+ {
+ loadtype = LOADTYPE_SKINFRAME;
+ }
+ else if(!strcmp(a, "--for")) // must be last option
{
for(i = i + 1; i != end - 1; ++i)
{
}
else if(!strcmp(a, "--finish_autodownload"))
{
- // nothing
+ if(numdownloads_added)
+ {
+ char donecommand[256];
+ if(cls.netcon)
+ {
+ if(cl.loadbegun) // curling won't inhibit loading the map any more when at this stage, so bail out and force a reconnect
+ {
+ dpsnprintf(donecommand, sizeof(donecommand), "connect %s", cls.netcon->address);
+ Curl_CommandWhenDone(donecommand);
+ noclear = TRUE;
+ CL_Disconnect();
+ noclear = FALSE;
+ Curl_CheckCommandWhenDone();
+ }
+ else
+ Curl_Register_predownload();
+ }
+ }
return;
}
+ else if(!strncmp(a, "--maxspeed=", 11))
+ {
+ maxspeed = atof(a + 11);
+ }
else if(*a == '-')
{
- Con_Printf("invalid option %s\n", a);
- return;
+ Con_Printf("curl: invalid option %s\n", a);
+ // but we ignore the option
}
}
needthefile:
- Curl_Begin(url, name, pak, forthismap);
+ Curl_Begin_ToFile(url, maxspeed, name, loadtype, forthismap);
}
+/*
+static void curl_curlcat_callback(int code, size_t length_received, unsigned char *buffer, void *cbdata)
+{
+ Con_Printf("Received %d bytes (status %d):\n%.*s\n", (int) length_received, code, (int) length_received, buffer);
+ Z_Free(buffer);
+}
+
+void Curl_CurlCat_f(void)
+{
+ unsigned char *buf;
+ const char *url = Cmd_Argv(1);
+ buf = Z_Malloc(16384);
+ Curl_Begin_ToMemory(url, buf, 16384, curl_curlcat_callback, NULL);
+}
+*/
+
/*
====================
Curl_Init_Commands
Cvar_RegisterVariable (&cl_curl_maxdownloads);
Cvar_RegisterVariable (&cl_curl_maxspeed);
Cvar_RegisterVariable (&sv_curl_defaulturl);
+ Cvar_RegisterVariable (&sv_curl_serverpackages);
+ Cvar_RegisterVariable (&sv_curl_maxspeed);
+ Cvar_RegisterVariable (&cl_curl_useragent);
+ Cvar_RegisterVariable (&cl_curl_useragent_append);
Cmd_AddCommand ("curl", Curl_Curl_f, "download data from an URL and add to search path");
+ //Cmd_AddCommand ("curlcat", Curl_CurlCat_f, "display data from an URL (debugging command)");
}
/*
array must be freed later using Z_Free.
====================
*/
-Curl_downloadinfo_t *Curl_GetDownloadInfo(int *nDownloads, const char **additional_info)
+Curl_downloadinfo_t *Curl_GetDownloadInfo(int *nDownloads, const char **additional_info, char *addinfo, size_t addinfolength)
{
- int n, i;
+ int i;
downloadinfo *di;
Curl_downloadinfo_t *downinfo;
- static char addinfo[128];
if(!curl_dll)
{
return NULL;
}
- n = 0;
+ if (curl_mutex) Thread_LockMutex(curl_mutex);
+
+ i = 0;
for(di = downloads; di; di = di->next)
- ++n;
+ ++i;
- downinfo = (Curl_downloadinfo_t *) Z_Malloc(sizeof(*downinfo) * n);
+ downinfo = (Curl_downloadinfo_t *) Z_Malloc(sizeof(*downinfo) * i);
i = 0;
for(di = downloads; di; di = di->next)
{
+ // do not show infobars for background downloads
+ if(developer.integer <= 0)
+ if(di->buffer)
+ continue;
strlcpy(downinfo[i].filename, di->filename, sizeof(downinfo[i].filename));
if(di->curle)
{
}
++i;
}
-
+
if(additional_info)
{
- // TODO put something better here?
- // maybe... check if the file is actually needed for the current map?
- if(Curl_Have_forthismap())
+ // TODO: can I clear command_when_done as soon as the first download fails?
+ if(*command_when_done && !numdownloads_fail && numdownloads_added)
{
- dpsnprintf(addinfo, sizeof(addinfo), "please wait for the download to complete");
+ if(!strncmp(command_when_done, "connect ", 8))
+ dpsnprintf(addinfo, addinfolength, "(will join %s when done)", command_when_done + 8);
+ else if(!strcmp(command_when_done, "cl_begindownloads"))
+ dpsnprintf(addinfo, addinfolength, "(will enter the game when done)");
+ else
+ dpsnprintf(addinfo, addinfolength, "(will do '%s' when done)", command_when_done);
*additional_info = addinfo;
}
else
*additional_info = NULL;
}
- *nDownloads = n;
+ *nDownloads = i;
+ if (curl_mutex) Thread_UnlockMutex(curl_mutex);
return downinfo;
}
*/
static const char *Curl_FindPackURL(const char *filename)
{
- static char foundurl[256];
+ static char foundurl[1024]; // invoked only by server
fs_offset_t filesize;
char *buf = (char *) FS_LoadFile("curl_urls.txt", tempmempool, true, &filesize);
if(buf && filesize)
char *p = buf;
char *pattern = NULL, *patternend = NULL, *url = NULL, *urlend = NULL;
qboolean eof = false;
-
+
pattern = p;
while(!eof)
{
typedef struct requirement_s
{
struct requirement_s *next;
- char filename[MAX_QPATH];
+ char filename[MAX_OSPATH];
}
requirement;
static requirement *requirements = NULL;
+/*
+====================
+Curl_RequireFile
+
+Adds the given file to the list of requirements.
+====================
+*/
+void Curl_RequireFile(const char *filename)
+{
+ requirement *req = (requirement *) Z_Malloc(sizeof(*requirements));
+ req->next = requirements;
+ strlcpy(req->filename, filename, sizeof(req->filename));
+ requirements = req;
+}
+
/*
====================
Curl_ClearRequirements
This should be called at every map change.
====================
*/
-void Curl_ClearRequirements()
+void Curl_ClearRequirements(void)
{
while(requirements)
{
}
}
-/*
-====================
-Curl_RequireFile
-
-Adds the given file to the list of requirements.
-====================
-*/
-void Curl_RequireFile(const char *filename)
-{
- requirement *req = (requirement *) Z_Malloc(sizeof(*requirements));
- req->next = requirements;
- strlcpy(req->filename, filename, sizeof(req->filename));
- requirements = req;
-}
-
/*
====================
Curl_SendRequirements
Makes the current host_clients download all files he needs.
This is done by sending him the following console commands:
- curl --start_autodownload
+ curl --clear_autodownload
curl --pak --for maps/pushmoddm1.bsp --forthismap http://where/this/darn/map/is/pushmoddm1.pk3
curl --finish_autodownload
====================
*/
-void Curl_SendRequirements()
+static qboolean Curl_SendRequirement(const char *filename, qboolean foundone, char *sendbuffer, size_t sendbuffer_len)
{
- // for each requirement, find the pack name
- char sendbuffer[4096] = "";
- requirement *req;
+ const char *p;
+ const char *thispack = FS_WhichPack(filename);
+ const char *packurl;
- strlcat(sendbuffer, "curl --clear_autodownload\n", sizeof(sendbuffer));
+ if(!thispack || !*thispack)
+ return false;
- for(req = requirements; req; req = req->next)
+ p = strrchr(thispack, '/');
+ if(p)
+ thispack = p + 1;
+
+ packurl = Curl_FindPackURL(thispack);
+
+ if(packurl && *packurl && strcmp(packurl, "-"))
{
- const char *p;
- const char *thispack = FS_WhichPack(req->filename);
- const char *packurl;
+ if(!foundone)
+ strlcat(sendbuffer, "curl --clear_autodownload\n", sendbuffer_len);
+
+ strlcat(sendbuffer, "curl --pak --forthismap --as ", sendbuffer_len);
+ strlcat(sendbuffer, thispack, sendbuffer_len);
+ if(sv_curl_maxspeed.value > 0)
+ dpsnprintf(sendbuffer + strlen(sendbuffer), sendbuffer_len - strlen(sendbuffer), " --maxspeed=%.1f", sv_curl_maxspeed.value);
+ strlcat(sendbuffer, " --for ", sendbuffer_len);
+ strlcat(sendbuffer, filename, sendbuffer_len);
+ strlcat(sendbuffer, " ", sendbuffer_len);
+ strlcat(sendbuffer, packurl, sendbuffer_len);
+ strlcat(sendbuffer, thispack, sendbuffer_len);
+ strlcat(sendbuffer, "\n", sendbuffer_len);
- if(!thispack)
- continue;
+ return true;
+ }
- p = strrchr(thispack, '/');
- if(p)
- thispack = p + 1;
+ return false;
+}
+void Curl_SendRequirements(void)
+{
+ // for each requirement, find the pack name
+ char sendbuffer[4096] = "";
+ requirement *req;
+ qboolean foundone = false;
+ const char *p;
- packurl = Curl_FindPackURL(thispack);
+ for(req = requirements; req; req = req->next)
+ foundone = Curl_SendRequirement(req->filename, foundone, sendbuffer, sizeof(sendbuffer)) || foundone;
- if(packurl && *packurl && strcmp(packurl, "-"))
- {
- strlcat(sendbuffer, "curl --pak --forthismap --as ", sizeof(sendbuffer));
- strlcat(sendbuffer, thispack, sizeof(sendbuffer));
- strlcat(sendbuffer, " --for ", sizeof(sendbuffer));
- strlcat(sendbuffer, req->filename, sizeof(sendbuffer));
- strlcat(sendbuffer, " ", sizeof(sendbuffer));
- strlcat(sendbuffer, packurl, sizeof(sendbuffer));
- strlcat(sendbuffer, thispack, sizeof(sendbuffer));
- strlcat(sendbuffer, "\n", sizeof(sendbuffer));
- }
- }
+ p = sv_curl_serverpackages.string;
+ while(COM_ParseToken_Simple(&p, false, false, true))
+ foundone = Curl_SendRequirement(com_token, foundone, sendbuffer, sizeof(sendbuffer)) || foundone;
- strlcat(sendbuffer, "curl --finish_autodownload\n", sizeof(sendbuffer));
+ if(foundone)
+ strlcat(sendbuffer, "curl --finish_autodownload\n", sizeof(sendbuffer));
if(strlen(sendbuffer) + 1 < sizeof(sendbuffer))
Host_ClientCommands("%s", sendbuffer);