Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
37 changes: 36 additions & 1 deletion include/clip_cache.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,42 @@ class ClipCache {
public:
ClipCache(size_t threadNum, unsigned int baseZoom):
baseZoom(baseZoom),
useFixedZoom(false),
fixedZoom(0),
clipCache(threadNum * 16),
clipCacheMutex(threadNum * 16),
clipCacheSize(threadNum * 16) {
}

ClipCache(size_t threadNum, unsigned int baseZoom, unsigned int fixedZoom):
baseZoom(baseZoom),
useFixedZoom(true),
fixedZoom(fixedZoom),
clipCache(threadNum * 16),
clipCacheMutex(threadNum * 16),
clipCacheSize(threadNum * 16) {
}

const std::shared_ptr<T> get(uint zoom, TileCoordinate x, TileCoordinate y, NodeID objectID) const{
if (useFixedZoom) {
if (zoom <= fixedZoom)
return nullptr;

while (zoom > fixedZoom) {
zoom--;
x /= 2;
y /= 2;
}

std::lock_guard<std::mutex> lock(clipCacheMutex[objectID % clipCacheMutex.size()]);
const auto& cache = clipCache[objectID % clipCache.size()];
const auto& rv = cache.find(std::make_tuple(zoom, TileCoordinates(x, y), objectID));
if (rv != cache.end())
return rv->second;

return nullptr;
}

// Look for a previously clipped version at z-1, z-2, ...

std::lock_guard<std::mutex> lock(clipCacheMutex[objectID % clipCacheMutex.size()]);
Expand All @@ -37,9 +67,12 @@ class ClipCache {
}

void add(const TileBbox& bbox, const NodeID objectID, const T& output) {
if (useFixedZoom && bbox.zoom != fixedZoom)
return;

// The point of caching is to reuse the clip, so caching at the terminal zoom is
// pointless.
if (bbox.zoom == baseZoom)
if (!useFixedZoom && bbox.zoom == baseZoom)
return;

std::shared_ptr<T> copy = std::make_shared<T>();
Expand Down Expand Up @@ -71,6 +104,8 @@ class ClipCache {

private:
unsigned int baseZoom;
bool useFixedZoom;
unsigned int fixedZoom;
std::vector<std::map<std::tuple<uint16_t, TileCoordinates, NodeID>, std::shared_ptr<T>>> clipCache;
mutable std::vector<std::mutex> clipCacheMutex;
std::vector<size_t> clipCacheSize;
Expand Down
2 changes: 1 addition & 1 deletion src/tile_data.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ TileDataSource::TileDataSource(size_t threadNum, unsigned int indexZoom, bool in
linestringStores(threadNum),
multilinestringStores(threadNum),
multipolygonStores(threadNum),
multiPolygonClipCache(ClipCache<MultiPolygon>(threadNum, indexZoom)),
multiPolygonClipCache(ClipCache<MultiPolygon>(threadNum, indexZoom, std::min(indexZoom, static_cast<unsigned int>(CLUSTER_ZOOM)))),
multiLinestringClipCache(ClipCache<MultiLinestring>(threadNum, indexZoom))
{
// TileDataSource can only index up to zoom 14. The caller is responsible for
Expand Down
155 changes: 83 additions & 72 deletions src/tilemaker.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -348,9 +348,6 @@ int main(const int argc, const char* argv[]) {

// ---- Write out data

// Launch the pool with threadNum threads
boost::asio::thread_pool pool(options.threadNum);

// Mutex is hold when IO is performed
std::mutex io_mutex;

Expand Down Expand Up @@ -460,86 +457,101 @@ int main(const int argc, const char* argv[]) {
// Cluster tiles: breadth-first for z0..z5, depth-first for z6
sortTileCoordinates(config.baseZoom, options.threadNum, tileCoordinates);

std::size_t batchSize = 0;
for(std::size_t startIndex = 0; startIndex < tileCoordinates.size(); startIndex += batchSize) {
// Compute how many tiles should be assigned to this batch --
// higher-zoom tiles are cheaper to compute, lower-zoom tiles more expensive.
batchSize = 0;
size_t weight = 0;
while (weight < 1000 && startIndex + batchSize < tileCoordinates.size()) {
const auto& zoom = tileCoordinates[startIndex + batchSize].first;
if (zoom > 12)
weight++;
else if (zoom > 11)
weight += 10;
else if (zoom > 10)
weight += 100;
else
weight += 1000;

batchSize++;
}
auto writeTiles = [&](bool cachedTiles) {
boost::asio::thread_pool pool(options.threadNum);
std::size_t batchSize = 0;
for(std::size_t startIndex = 0; startIndex < tileCoordinates.size(); startIndex += batchSize) {
while (startIndex < tileCoordinates.size() && (tileCoordinates[startIndex].first <= CLUSTER_ZOOM) != cachedTiles)
startIndex++;

if (startIndex >= tileCoordinates.size())
break;

// Compute how many tiles should be assigned to this batch --
// higher-zoom tiles are cheaper to compute, lower-zoom tiles more expensive.
batchSize = 0;
size_t weight = 0;
while (weight < 1000 && startIndex + batchSize < tileCoordinates.size()) {
const auto& zoom = tileCoordinates[startIndex + batchSize].first;
if ((zoom <= CLUSTER_ZOOM) != cachedTiles)
break;

if (zoom > 12)
weight++;
else if (zoom > 11)
weight += 10;
else if (zoom > 10)
weight += 100;
else
weight += 1000;

batchSize++;
}

boost::asio::post(pool, [=, &tileCoordinates, &pool, &sharedData, &sources, &attributeStore, &io_mutex, &tilesWritten, &lastTilesWritten]() {
std::vector<std::string> tileTimings;
std::size_t endIndex = std::min(tileCoordinates.size(), startIndex + batchSize);
for(std::size_t i = startIndex; i < endIndex; ++i) {
unsigned int zoom = tileCoordinates[i].first;
TileCoordinates coords = tileCoordinates[i].second;
boost::asio::post(pool, [=, &tileCoordinates, &sharedData, &sources, &attributeStore, &io_mutex, &tilesWritten, &lastTilesWritten]() {
std::vector<std::string> tileTimings;
std::size_t endIndex = std::min(tileCoordinates.size(), startIndex + batchSize);
for(std::size_t i = startIndex; i < endIndex; ++i) {
unsigned int zoom = tileCoordinates[i].first;
TileCoordinates coords = tileCoordinates[i].second;

#ifdef CLOCK_MONOTONIC
timespec start, end;
if (options.logTileTimings)
clock_gettime(CLOCK_MONOTONIC, &start);
timespec start, end;
if (options.logTileTimings)
clock_gettime(CLOCK_MONOTONIC, &start);
#endif

std::vector<std::vector<OutputObjectID>> data;
for (auto source : sources) {
data.emplace_back(source->getObjectsForTile(sortOrders, zoom, coords));
}
outputProc(sharedData, sources, attributeStore, data, coords, zoom);
std::vector<std::vector<OutputObjectID>> data;
for (auto source : sources) {
data.emplace_back(source->getObjectsForTile(sortOrders, zoom, coords));
}
outputProc(sharedData, sources, attributeStore, data, coords, zoom);

#ifdef CLOCK_MONOTONIC
if (options.logTileTimings) {
clock_gettime(CLOCK_MONOTONIC, &end);
uint64_t tileNs = 1e9 * (end.tv_sec - start.tv_sec) + end.tv_nsec - start.tv_nsec;
std::string output = "z" + std::to_string(zoom) + "/" + std::to_string(coords.x) + "/" + std::to_string(coords.y) + " took " + std::to_string(tileNs/1e6) + " ms";
tileTimings.push_back(output);
}
if (options.logTileTimings) {
clock_gettime(CLOCK_MONOTONIC, &end);
uint64_t tileNs = 1e9 * (end.tv_sec - start.tv_sec) + end.tv_nsec - start.tv_nsec;
std::string output = "z" + std::to_string(zoom) + "/" + std::to_string(coords.x) + "/" + std::to_string(coords.y) + " took " + std::to_string(tileNs/1e6) + " ms";
tileTimings.push_back(output);
}
#endif
}
}

if (options.logTileTimings) {
const std::lock_guard<std::mutex> lock(io_mutex);
std::cout << std::endl;
for (const auto& output : tileTimings)
std::cout << output << std::endl;
}
if (options.logTileTimings) {
const std::lock_guard<std::mutex> lock(io_mutex);
std::cout << std::endl;
for (const auto& output : tileTimings)
std::cout << output << std::endl;
}

tilesWritten += (endIndex - startIndex);

if (io_mutex.try_lock()) {
uint64_t written = tilesWritten.load();

if (written >= lastTilesWritten + tileCoordinates.size() / 100 || ISATTY) {
lastTilesWritten = written;
// Show progress grouped by z6 (or lower)
size_t z = tileCoordinates[startIndex].first;
size_t x = tileCoordinates[startIndex].second.x;
size_t y = tileCoordinates[startIndex].second.y;
if (z > CLUSTER_ZOOM) {
x = x / (1 << (z - CLUSTER_ZOOM));
y = y / (1 << (z - CLUSTER_ZOOM));
z = CLUSTER_ZOOM;
tilesWritten += (endIndex - startIndex);

if (io_mutex.try_lock()) {
uint64_t written = tilesWritten.load();

if (written >= lastTilesWritten + tileCoordinates.size() / 100 || ISATTY) {
lastTilesWritten = written;
// Show progress grouped by z6 (or lower)
size_t z = tileCoordinates[startIndex].first;
size_t x = tileCoordinates[startIndex].second.x;
size_t y = tileCoordinates[startIndex].second.y;
if (z > CLUSTER_ZOOM) {
x = x / (1 << (z - CLUSTER_ZOOM));
y = y / (1 << (z - CLUSTER_ZOOM));
z = CLUSTER_ZOOM;
}
cout << "z" << z << "/" << x << "/" << y << ", writing tile " << written << " of " << tileCoordinates.size() << " \r" << std::flush;
}
cout << "z" << z << "/" << x << "/" << y << ", writing tile " << written << " of " << tileCoordinates.size() << " \r" << std::flush;
io_mutex.unlock();
}
io_mutex.unlock();
}
});
}
// Wait for all tasks in the pool to complete.
pool.join();
});
}
// Wait for all tasks in the pool to complete.
pool.join();
};

writeTiles(true);
writeTiles(false);

// ---- Close tileset

Expand All @@ -564,4 +576,3 @@ int main(const int argc, const char* argv[]) {

cout << endl << "Filled the tileset with good things at " << sharedData.outputFile << endl;
}