diff options
author | Jack Sangdahl <[email protected]> | 2024-11-16 19:42:31 -0700 |
---|---|---|
committer | Jack Sangdahl <[email protected]> | 2024-11-16 19:42:31 -0700 |
commit | 17b2b82eba368c8407b05c5bdee6b7c29b8c6abe (patch) | |
tree | 9941c6513d1f0f786c56fb3e6ca58b50e91ac5f4 | |
parent | 102d6be55e2e665b9c9c688725d1119a501139ef (diff) | |
download | ecen5273-17b2b82eba368c8407b05c5bdee6b7c29b8c6abe.tar.gz ecen5273-17b2b82eba368c8407b05c5bdee6b7c29b8c6abe.zip |
Programming Assignment 3: Proxy
Signed-off-by: Jack Sangdahl <[email protected]>
-rw-r--r-- | pa3_proxy/.gitignore | 4 | ||||
-rw-r--r-- | pa3_proxy/Makefile | 11 | ||||
-rw-r--r-- | pa3_proxy/proxy.c | 362 | ||||
-rw-r--r-- | pa3_proxy/proxy.h | 49 | ||||
-rw-r--r-- | pa3_proxy/readme.md | 154 |
5 files changed, 580 insertions, 0 deletions
diff --git a/pa3_proxy/.gitignore b/pa3_proxy/.gitignore new file mode 100644 index 0000000..062250b --- /dev/null +++ b/pa3_proxy/.gitignore @@ -0,0 +1,4 @@ +proxy +*.o +.cache +blocklist diff --git a/pa3_proxy/Makefile b/pa3_proxy/Makefile new file mode 100644 index 0000000..f2181dc --- /dev/null +++ b/pa3_proxy/Makefile @@ -0,0 +1,11 @@ +OBJS = proxy.o +CFLAGS = -Wall -Wextra -g +LDFLAGS = -lssl -lcrypto -lpthread + +all: $(OBJS) + cc $(CFLAGS) -o proxy $(OBJS) $(LDFLAGS) + +clean: + rm cache/* *.o proxy + +proxy.o: proxy.h diff --git a/pa3_proxy/proxy.c b/pa3_proxy/proxy.c new file mode 100644 index 0000000..a7f2885 --- /dev/null +++ b/pa3_proxy/proxy.c @@ -0,0 +1,362 @@ +// HTTP Caching Proxy +// Jack Sangdahl +// 28 October 2024 +#include "proxy.h" + +static int timeout; + +int main(int argc, char **argv) { + if (argc < 3) { + fprintf(stderr, "usage: %s <port number> <cache timeout>\n", argv[0]); + return EXIT_FAILURE; + } + signal(SIGINT, handle_sigint); + int port_num = atoi(argv[1]); + timeout = atoi(argv[2]); + struct stat st = {0}; + if (stat(CACHE_DIRECTORY, &st) == -1) + mkdir(CACHE_DIRECTORY, 0700); + pthread_t tid; + pthread_attr_t attr; + if (pthread_attr_init(&attr)) { + perror("creating thread attributes failed"); + return EXIT_FAILURE; + } + if (pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED)) { + perror("setting detached state failed"); + return EXIT_FAILURE; + } + int sockfd = socket(AF_INET, SOCK_STREAM, 0); + if (sockfd < 0) { + perror("socket failed"); + return EXIT_FAILURE; + } + printf("socket created\n"); + struct sockaddr_in server_addr, client_addr; + int server_addr_len = sizeof(server_addr); + int client_addr_len = sizeof(client_addr); + server_addr.sin_family = AF_INET; + server_addr.sin_port = htons(port_num); + server_addr.sin_addr.s_addr = htonl(INADDR_ANY); + if (bind(sockfd, (struct sockaddr *)&server_addr, server_addr_len)) { + perror("binding failed"); + return EXIT_FAILURE; + } + printf("socket bound to address\n"); + if (listen(sockfd, SOMAXCONN)) { + perror("listening failed"); + return EXIT_FAILURE; + } + printf("listening for connections...\n"); + while (1) { + int *new_sockfd = (int *)malloc(sizeof(int)); + *new_sockfd = accept(sockfd, (struct sockaddr *)&client_addr, + (socklen_t *)&client_addr_len); + if (*new_sockfd < 0) + perror("accepting client connection failed"); + else + printf("new connection accepted\n"); + if (pthread_create(&tid, &attr, handle_connection, (void *)new_sockfd)) { + free(new_sockfd); + perror("creating thread failed"); + } + } + return EXIT_SUCCESS; +} + +void *handle_connection(void *a) { + int connection = *(int *)a; + struct sockaddr_in server_addr; + server_addr.sin_family = AF_INET; + server_addr.sin_port = htons(80); + struct prefetcher_args pf; + pthread_t prefetch_tid; + pthread_attr_t prefetch_attr; + if (pthread_attr_init(&prefetch_attr)) { + perror("creating prefetcher thread attributes failed"); + pthread_exit(NULL); + } + if (pthread_attr_setdetachstate(&prefetch_attr, PTHREAD_CREATE_DETACHED)) { + perror("setting prefetcher detached state failed"); + pthread_exit(NULL); + } + while (1) { + bool prefetch = false; + char client_proxy_data[DATA_SIZE]; + char server_proxy_data[DATA_SIZE]; + char proxy_client_data[DATA_SIZE]; + char http_version[10], hostname[100], request_type[10], url[DATA_SIZE], + md5_checksum[33], cache_filename[50], extracted_ip[20], port_str[10], + cached_ip[20]; + ssize_t bytes_recv = + recv(connection, client_proxy_data, sizeof(client_proxy_data), 0); + if (!bytes_recv) { + printf("request complete\n"); + break; + } + sscanf(client_proxy_data, "%s %s %s", request_type, url, http_version); + if (!strcmp(request_type, "GET")) { + if (strstr(url, "https")) { + strcpy(proxy_client_data, RSP_400); + send(connection, proxy_client_data, strlen(proxy_client_data), + MSG_NOSIGNAL); + send(connection, MSG_400_URL, strlen(MSG_400_URL), MSG_NOSIGNAL); + continue; + } + if (file_cached(url)) { + printf("valid cache file found\n"); + send_cached_file(url, connection); + } else { + calculate_md5sum(url, md5_checksum); + sprintf(cache_filename, "%s%s", CACHE_DIRECTORY, md5_checksum); + printf("no file found in cache\n"); + sscanf(url, "%*[^/]%*c%*c%[^/]", hostname); + printf("hostname: %s\n", hostname); + if (strchr(hostname, ':')) { + sscanf(hostname, "%[^:]%*c%[^/]", extracted_ip, port_str); + if (blacklisted(extracted_ip)) { + strcpy(proxy_client_data, RSP_403); + send(connection, proxy_client_data, strlen(proxy_client_data), + MSG_NOSIGNAL); + send(connection, MSG_403, strlen(MSG_403), MSG_NOSIGNAL); + continue; + } + server_addr.sin_addr.s_addr = inet_addr(extracted_ip); + server_addr.sin_port = htons(atoi(port_str)); + } else { + if (blacklisted(hostname)) { + strcpy(proxy_client_data, RSP_403); + send(connection, proxy_client_data, strlen(proxy_client_data), + MSG_NOSIGNAL); + send(connection, MSG_403, strlen(MSG_403), MSG_NOSIGNAL); + continue; + } + if (hostname_cached(hostname, cached_ip)) + server_addr.sin_addr.s_addr = inet_addr(cached_ip); + else { + struct hostent *server = gethostbyname(hostname); + if (!server) { + perror("getting server info failed"); + continue; + } + memcpy((char *)&server_addr.sin_addr, server->h_addr, + server->h_length); + cache_hostname(hostname, inet_ntoa(server_addr.sin_addr)); + } + } + int server_sockfd = socket(AF_INET, SOCK_STREAM, 0); + if (server_sockfd < 0) { + perror("server socket failed"); + continue; + } + if (connect(server_sockfd, (struct sockaddr *)&server_addr, + sizeof(server_addr)) < 0 && + errno != EISCONN) { + perror("connecting to host failed"); + continue; + } + send(server_sockfd, client_proxy_data, strlen(client_proxy_data), + MSG_NOSIGNAL); + FILE *fp = fopen(cache_filename, "w"); + time_t curr = time(NULL); + fprintf(fp, "%lu\n", curr); + while ((bytes_recv = recv(server_sockfd, server_proxy_data, + sizeof(server_proxy_data), 0))) { + if (strstr(server_proxy_data, "<html")) + prefetch = true; + printf("bytes received: %zd\n", bytes_recv); + fwrite(server_proxy_data, sizeof(char), bytes_recv, fp); + send(connection, server_proxy_data, bytes_recv, MSG_NOSIGNAL); + memset(server_proxy_data, 0, sizeof(server_proxy_data)); + } + long file_size = ftell(fp); + fclose(fp); + if (file_size < 20) + remove(cache_filename); + if (prefetch) { + strcpy(pf.filename, cache_filename); + strcpy(pf.hostname, hostname); + strcpy(pf.ipaddr, inet_ntoa(server_addr.sin_addr)); + if (pthread_create(&prefetch_tid, &prefetch_attr, prefetcher, + (void *)&pf)) + perror("creating prefetcher thread failed"); + } + } + } else { + strcpy(proxy_client_data, RSP_400); + send(connection, proxy_client_data, strlen(proxy_client_data), + MSG_NOSIGNAL); + send(connection, MSG_400_METHOD, strlen(MSG_400_METHOD), MSG_NOSIGNAL); + } + } + close(connection); + pthread_exit(NULL); +} + +void *prefetcher(void *a) { + struct prefetcher_args pf = *(struct prefetcher_args *)a; + char html_filename[50]; + char server_hostname[100]; + char server_ipaddr[20]; + strcpy(html_filename, pf.filename); + strcpy(server_hostname, pf.hostname); + strcpy(server_ipaddr, pf.ipaddr); + FILE *html_ptr = fopen(html_filename, "r"); + if (!html_ptr) { + perror("opening html file failed"); + pthread_exit(NULL); + } + struct sockaddr_in server_addr; + server_addr.sin_family = AF_INET; + server_addr.sin_port = htons(80); + server_addr.sin_addr.s_addr = inet_addr(server_ipaddr); + char *line; + size_t length; + while (getline(&line, &length, html_ptr) != -1) { + char cache_path[50], http_request[DATA_SIZE], recv_data[DATA_SIZE], + md5_url[33], full_url[207], rel_url[100], + *href_pos = strstr(line, "href"); + if (href_pos) { + int server_sockfd = socket(AF_INET, SOCK_STREAM, 0); + if (server_sockfd < 0) { + perror("server socket failed"); + pthread_exit(NULL); + } + if (connect(server_sockfd, (struct sockaddr *)&server_addr, + sizeof(server_addr)) < 0 && + errno != EISCONN) { + perror("connecting to host failed"); + pthread_exit(NULL); + } + char *url_start = strstr(line, "http://"); + if (url_start) + sscanf(url_start, "%[^\"]", full_url); + else { + sscanf(href_pos, "%*[^=]%*c%*c%[^\"]", rel_url); + if (rel_url[0] == '/') + sprintf(full_url, "http://%s%s", server_hostname, rel_url); + else + sprintf(full_url, "http://%s/%s", server_hostname, rel_url); + } + sprintf(http_request, "GET %s HTTP/1.0\r\n\r\n", full_url); + calculate_md5sum(full_url, md5_url); + sprintf(cache_path, "%s%s", CACHE_DIRECTORY, md5_url); + send(server_sockfd, http_request, strlen(http_request), MSG_NOSIGNAL); + printf("cache path: %s\n", cache_path); + FILE *fp = fopen(cache_path, "w"); + time_t curr = time(NULL); + fprintf(fp, "%lu\n", curr); + ssize_t bytes_recv; + while ( + (bytes_recv = recv(server_sockfd, recv_data, sizeof(recv_data), 0))) { + fwrite(recv_data, sizeof(char), bytes_recv, fp); + memset(recv_data, 0, sizeof(recv_data)); + } + long file_size = ftell(fp); + fclose(fp); + if (file_size < 20) + remove(cache_path); + printf("prefetched link: %s\n", full_url); + close(server_sockfd); + } else + continue; + } + fclose(html_ptr); + printf("prefetching complete\n"); + pthread_exit(NULL); +} + +void calculate_md5sum(char *url, char *md5_checksum) { + EVP_MD_CTX *ctx = EVP_MD_CTX_new(); + EVP_DigestInit_ex(ctx, EVP_md5(), NULL); + EVP_DigestUpdate(ctx, url, strlen(url)); + uint md5_digest_len = EVP_MD_size(EVP_md5()); + u_char *md5_digest = + (u_char *)OPENSSL_malloc(md5_digest_len * sizeof(u_char)); + EVP_DigestFinal_ex(ctx, md5_digest, &md5_digest_len); + for (int i = 0; i < 16; ++i) + sprintf(md5_checksum + i * 2, "%02x", (uint)md5_digest[i]); + EVP_MD_CTX_free(ctx); + OPENSSL_free(md5_digest); +} + +void send_cached_file(char *url, int connection) { + char md5_checksum[33]; + char filename[50]; + char data_read[DATA_SIZE]; + size_t bytes_read; + char *line; + size_t length; + calculate_md5sum(url, md5_checksum); + sprintf(filename, "%s%s", CACHE_DIRECTORY, md5_checksum); + FILE *fp = fopen(filename, "r"); + if (getline(&line, &length, fp) == -1) + return; + while ((bytes_read = fread(data_read, sizeof(char), sizeof(data_read), fp))) { + send(connection, data_read, bytes_read, MSG_NOSIGNAL); + memset(data_read, 0, sizeof(data_read)); + } + fclose(fp); +} + +bool file_cached(char *url) { + char md5_checksum[33]; + char filename[50]; + time_t curr = time(NULL), begin, diff; + char *line; + size_t length; + calculate_md5sum(url, md5_checksum); + sprintf(filename, "%s%s", CACHE_DIRECTORY, md5_checksum); + FILE *fp = fopen(filename, "r"); + if (!fp) + return false; + else { + getline(&line, &length, fp); + sscanf(line, "%ld", &begin); + diff = (curr - begin); + if (diff > timeout) { + fclose(fp); + remove(filename); + return false; + } + fclose(fp); + return true; + } +} + +bool blacklisted(char *ipaddr) { + FILE *block_list_ptr = fopen(BLOCKLIST_FILENAME, "a+"); + char *line; + size_t length; + while (getline(&line, &length, block_list_ptr) != -1) + if (strstr(line, ipaddr)) + return true; + return false; +} + +void cache_hostname(char *hostname, char *ipaddr) { + FILE *fp = fopen(HOST_CACHE_FILENAME, "a+"); + fprintf(fp, "%s,%s\n", hostname, ipaddr); + fclose(fp); +} + +bool hostname_cached(char *hostname, char *ipaddr) { + strcpy(ipaddr, "none"); + char *line; + size_t length; + FILE *fp = fopen(HOST_CACHE_FILENAME, "w+"); + while (getline(&line, &length, fp) != -1) + if (strstr(line, hostname)) { + sscanf(line, "%*[^,]%*c%s", ipaddr); + break; + } + fclose(fp); + return !strcmp(ipaddr, "none") ? false : true; +} + +void handle_sigint(int sig) { + if (sig == SIGINT) { + printf("\nterminating proxy...\n"); + exit(EXIT_SUCCESS); + } +} diff --git a/pa3_proxy/proxy.h b/pa3_proxy/proxy.h new file mode 100644 index 0000000..d1d4f7d --- /dev/null +++ b/pa3_proxy/proxy.h @@ -0,0 +1,49 @@ +// HTTP Caching Proxy +// Jack Sangdahl +// 28 October 2024 +#include <arpa/inet.h> +#include <dirent.h> +#include <errno.h> +#include <netdb.h> +#include <netinet/in.h> +#include <openssl/evp.h> +#include <pthread.h> +#include <signal.h> +#include <stdbool.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/socket.h> +#include <sys/stat.h> +#include <sys/types.h> +#include <unistd.h> + +#define DATA_SIZE 10240 +#define CACHE_DIRECTORY "./cache/" +#define HOST_CACHE_FILENAME "./cache/hostnames" +#define BLOCKLIST_FILENAME "./blocklist" +#define RSP_400 "HTTP/1.1 400 Bad Request\r\nContent-Type: text/html\r\n\r\n" +#define MSG_400_METHOD \ + "<html><body>400 Bad Request Reason: Invalid Method:<<request " \ + "method>></body></html>\r\n\r\n" +#define MSG_400_URL \ + "<html><body>400 Bad Request Reason: Invalid URL: <<requested " \ + "url>></body></html>\r\n\r\n" +#define RSP_403 "HTTP/1.1 403 Forbidden\r\nContent-Type: text/html\r\n" +#define MSG_403 "<html><body>403 Blocked Website</body></html>\r\n\r\n" + +struct prefetcher_args { + char filename[50]; + char hostname[100]; + char ipaddr[20]; +}; + +bool blacklisted(char *ipaddr); +bool file_cached(char *url); +bool hostname_cached(char *hostname, char *ipaddr); +void *handle_connection(void *a); +void *prefetcher(void *a); +void calculate_md5sum(char *url, char *md5string); +void send_cached_file(char *url, int connection); +void cache_hostname(char *hostname, char *ipaddr); +void handle_sigint(int sig); diff --git a/pa3_proxy/readme.md b/pa3_proxy/readme.md new file mode 100644 index 0000000..1b5b245 --- /dev/null +++ b/pa3_proxy/readme.md @@ -0,0 +1,154 @@ +## Proxy +### Programming Assignment 3 - HTTP Caching Proxy + +In this assignment, you will build a proxy server that is capable of relaying HTTP +requests from clients to HTTP servers. With the HTTP client-server protocol, the client +usually connects directly to an HTTP server. Oftentimes it is useful to introduce an +intermediate entity called a proxy. With a proxy, the browser instead sends an HTTP +request to the proxy first. The proxy then forwards this request to the intended HTTP +server, and then receives the reply from the HTTP server. The proxy finally forwards +this reply to the requesting HTTP client. There are several advantages to utilising a +proxy: +* **Performance**: If we cache a page that is frequently accessed by clients connected +to the proxy, we can reduce the need to create new connections to the origin HTTP server +for every request. +* **Content Filtering**: The proxy can inspect URLs from the HTTP client and decide +whether to block connections to some specific URLs. Or to reform webpages (eg. image +transcoding for clients with limited processing capability). +* **Privacy & Security**: When the origin HTTP server tries to log IP address +information from the HTTP client, it can only get the information of the proxy but not +the actual client. Forcing all web traffic through a proxy "checkpoint" can help network +administrators centralise the detection of viruses and other threats. + +### The Proxy Server +The program must take one command line argument; a port number for the server to use: +```shell +./proxy 8080 # running proxy with port number 8080 +``` +Your proxy must be able to handle multiple requests at the same time, such that multiple +clients can access different servers with your proxy simultaneously. As with +[PA2](../pa2_webserver), you may achieve this requirement through either a forking or +multi-threaded implementation. + +Your proxy should run in an infinite loop once it has started. You are expected to exit +out of the proxy gracefully when pressing Ctrl + C. When an HTTP request comes in, your +application should determine whether this request is properly formatted with the +following criteria: +* The HTTP method is valid. Our web proxy only needs to support `GET` +* The HTTP server specified in the requested URL exists (usually by calling +`gethostbyname()`) + +If the above criteria are not met, a corresponding error message will be sent to the +HTTP client (eg. `400 Bad Request`). Once a valid request is received, the proxy will +parse the requested URL into the following 3 parts: +1. Requested host and port number (you should use the default port 80 if no port number +is specified) +2. Requested path (used to access resources at the HTTP server) +3. Optional message body (this may not appear in every HTTP request) + +If the requested host is not resolved to any IP address, then the error message +`404 Not Found` must be returned to the client. You can do this right after a +`gethostbyname()` call as this will return an IP address only for valid hostnames. + +The request received by the proxy will be identical to that received by an HTTP server. +fif we were set to our browser to use a proxy running on port 8000 and capture the `GET` +request to [www.yahoo.com](www.yahoo.com). it would look something like the following: +```shell +$ nc -l 8000 +GET http://www.yahoo.com/ HTTP/1.1 +Host: www.yahoo.com +Proxy-Connection: keep-alive +Accept:text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8 +Upgrade-Insecure-Requests: 1 +User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.71 Safari/537.36 DNT: 1 +Accept-Encoding: gzip, deflate, sdch +Accept-Language: en-US,en;q=0.8 +Cookie: B=fgvem7ha9r7hv&b=3&s=0o; ucs=sfcTs=1425977267&sfc=1 +``` + +### Forwarding requests to the HTTP server and relaying data +After the proxy has parsed the information from the client, it constructs a new HTTP +request and sends it over a second socket connection to the indicated HTTP server. The +proxy will then forward the reply from the HTTP server back to the client. So you will +need to manage two concurrent socket socket connections - one from the client to the +proxy, and one from the proxy to the server. In summary: +* Your proxy listens on the specified port number and accepts a connection +* You will then parse the request received from this connection and check if the request +is properly formatted +* If properly formatted, your proxy will create a second socket and send a request to +the specified HTTP server +* Your proxy will then relay the results from the server to the client + +### Blocklist +Create a file called `./blocklist` which has a list of websites and/or IP addresses that +must be blocked. Thus, for every incoming request, the proxy must look up the requested +host in `./blocklist` and deny requests for these websites or IPs. When a client +requests one of them, your proxy should return `403 Forbidden`. This file should be +formatted with one hostname or IP address per line: +``` +www.google.com +mail.yahoo.com +154.240.28.35 +``` + +### Caching +Caching is one of the most common performance enhancements that web proxies implement. +Caching takes advantage of the fact that most pages on the web don't change that often, +and that any page that you visit once, you (or someone else using the same proxy) are +likely to visit again. A caching proxy server saves a copy of the files that it +retrieves from remote servers. When another request comes in for the same resource, it +returns the saved copy instead of creating a new connection to a remote server. This +can create more significant savings in the case of a more distant server or a remote +server that is overloaded. + +Caching introduces a few new complexities as well. First of all, most web content is +dynamically generated, and as such shouldn't really be cached. Second, we need to +decide how long to keep pages in our cache. If the timeout is set too short, we negate +most of the advantages of having a caching proxy. If the timeout is set too long, the +client may end up looking at pages that are outdated or irrelevant. + +There are a few considerations that will affect the caching behaviour of web proxy: +1. **Timeout setting**: Alter your proxy so that you can specify a timeout value on the +command line as a runtime option. For example, `./proxy 8080 60` will run the proxy +with a timeout value of 60 seconds. +2. **Page Cache**: You'll need to alter how your proxy retrieves pages pages. You +should now check to see if a page exists in the cache before retrieving a page from a +remote server. If there is a valid cached copy of the page, that should be presented to +the client instead of creating a new server connection. You will need to create local +files to store the cached pages. +3. **Expiration**: You will need to implement cache expiration. The timing does not +need to be exact (ie. it's okay if a page is still in your cache after the timeout has +expired, but it's not okay to serve an expired page from the cache). To keep things +simple, we are not handling the cache-control header, which is sent by the server and +specifies how long the client or the proxy should cache the content. Instead, we will +use the timeout specified by the command line argument. +4. **Dynamic content**: Many webpages have parameters embedded in the URL to specify +that a page be dynamically generated. You must consider these parameters when +determining whether or not to server a previously cached page. You'll most likely +want to not cache dynamic pages. + +Tip: when you check to see if a specific URL exists in the cache, you might want to +compare hashes instead of the entire URL. For example, suppose that you store +[http://www.yahoo.com/logo.jpg](http://www.yahoo.com/logo.jpg) with the hash key +`0xAC10DE97073ACD81` using a hash function like `md5sum` of the URL. When you receive +the same URL from the client, you can simply calculate the hash value of the requested +URL and compare it with the hash keys stored in the cache. Keep cached files in a +`./cache` directory. + +### Link Prefetch +Building on top of your proxy and caching code, the last piece of functionality +desired is called link prefetching. The idea behind prefetching is simple: if a user +asks for a particular page, the odds are that they will next request a page linked from +that page. Prefetching uses this information to attempt to speed up browsing by parsing +requested pages for links, and then fetching the linked pages in the background. The +pages fetched from the links are stored in the cache, ready to be served to the client +when they are requested without the client having to wait around for the remote server +to be contacted. + +Parsing and fetching links can take a significant amount of time, especially for a page +with a lot of links. In order to fetch multiple links simultaneously in background, you +have to use multi-threading or forking. One thread/process should remain dedicated to +the tasks that you have already implemented: reading requests from the client and +serving pages from either the cache or a remote server. In a separate thread/process, +the proxy will parse a page and extra the HTTP links, request those links from the +remote server, and add them to the cache. |