Try to reduce stutter when loading
This commit is contained in:
@@ -75,7 +75,9 @@ public class RenderService<T extends AbstractSectionRenderer<J, ?>, J extends Vi
|
||||
this.geometryUpdateQueue = new MessageQueue<>(this.nodeManager::processGeometryResult);
|
||||
|
||||
this.viewportSelector = new ViewportSelector<>(this.sectionRenderer::createViewport);
|
||||
this.renderGen = new RenderGenerationService(world, this.modelService, serviceThreadPool, this.geometryUpdateQueue::push, this.sectionRenderer.getGeometryManager() instanceof IUsesMeshlets);
|
||||
this.renderGen = new RenderGenerationService(world, this.modelService, serviceThreadPool,
|
||||
this.geometryUpdateQueue::push, this.sectionRenderer.getGeometryManager() instanceof IUsesMeshlets,
|
||||
()->this.geometryUpdateQueue.count()<2000);
|
||||
|
||||
router.setCallbacks(this.renderGen::enqueueTask, section -> {
|
||||
section.acquire();
|
||||
@@ -129,8 +131,11 @@ public class RenderService<T extends AbstractSectionRenderer<J, ?>, J extends Vi
|
||||
DownloadStream.INSTANCE.tick();
|
||||
|
||||
|
||||
this.sectionUpdateQueue.consume();
|
||||
this.geometryUpdateQueue.consume();
|
||||
this.sectionUpdateQueue.consume(128);
|
||||
|
||||
//Cap the number of consumed sections per frame to 40 + 2% of the queue size, cap of 200
|
||||
int geoUpdateCap = Math.max(100, Math.min((int)(0.02*this.geometryUpdateQueue.count()), 200));
|
||||
this.geometryUpdateQueue.consume(geoUpdateCap);
|
||||
if (this.nodeManager.writeChanges(this.traversal.getNodeBuffer())) {//TODO: maybe move the node buffer out of the traversal class
|
||||
UploadStream.INSTANCE.commit();
|
||||
}
|
||||
@@ -185,12 +190,16 @@ public class RenderService<T extends AbstractSectionRenderer<J, ?>, J extends Vi
|
||||
this.world.getMapper().setBiomeCallback(null);
|
||||
this.world.getMapper().setStateCallback(null);
|
||||
|
||||
//Release all the unprocessed built geometry
|
||||
this.geometryUpdateQueue.clear(BuiltSection::free);
|
||||
|
||||
this.modelService.shutdown();
|
||||
this.renderGen.shutdown();
|
||||
this.viewportSelector.free();
|
||||
this.sectionRenderer.free();
|
||||
this.traversal.free();
|
||||
this.nodeCleaner.free();
|
||||
|
||||
//Release all the unprocessed built geometry
|
||||
this.geometryUpdateQueue.clear(BuiltSection::free);
|
||||
this.sectionUpdateQueue.clear(WorldSection::release);//Release anything thats in the queue
|
||||
|
||||
@@ -14,6 +14,7 @@ import me.cortex.voxy.common.thread.ServiceSlice;
|
||||
import me.cortex.voxy.common.thread.ServiceThreadPool;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.function.BooleanSupplier;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
@@ -39,6 +40,10 @@ public class RenderGenerationService {
|
||||
|
||||
|
||||
public RenderGenerationService(WorldEngine world, ModelBakerySubsystem modelBakery, ServiceThreadPool serviceThreadPool, Consumer<BuiltSection> consumer, boolean emitMeshlets) {
|
||||
this(world, modelBakery, serviceThreadPool, consumer, emitMeshlets, ()->true);
|
||||
}
|
||||
|
||||
public RenderGenerationService(WorldEngine world, ModelBakerySubsystem modelBakery, ServiceThreadPool serviceThreadPool, Consumer<BuiltSection> consumer, boolean emitMeshlets, BooleanSupplier taskLimiter) {
|
||||
this.emitMeshlets = emitMeshlets;
|
||||
this.world = world;
|
||||
this.modelBakery = modelBakery;
|
||||
@@ -50,7 +55,7 @@ public class RenderGenerationService {
|
||||
return new Pair<>(() -> {
|
||||
this.processJob(factory);
|
||||
}, factory::free);
|
||||
});
|
||||
}, taskLimiter);
|
||||
}
|
||||
|
||||
//NOTE: the biomes are always fully populated/kept up to date
|
||||
@@ -166,6 +171,10 @@ public class RenderGenerationService {
|
||||
this.threads.execute();
|
||||
return new BuildTask(key);
|
||||
});
|
||||
//Prioritize lower detail builds
|
||||
if (WorldEngine.getLevel(pos) > 2) {
|
||||
this.taskQueue.getAndMoveToFirst(pos);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -189,14 +198,19 @@ public class RenderGenerationService {
|
||||
|
||||
public void shutdown() {
|
||||
//Steal and free as much work as possible
|
||||
while (this.threads.steal()) {
|
||||
while (this.threads.hasJobs()) {
|
||||
int i = this.threads.drain();
|
||||
if (i == 0) break;
|
||||
|
||||
synchronized (this.taskQueue) {
|
||||
for (int j = 0; j < i; j++) {
|
||||
var task = this.taskQueue.removeFirst();
|
||||
if (task.section != null) {
|
||||
task.section.release();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//Shutdown the threads
|
||||
this.threads.shutdown();
|
||||
|
||||
@@ -139,6 +139,10 @@ public class ServiceSlice extends TrackedObject {
|
||||
return this.jobCount.availablePermits() != 0;
|
||||
}
|
||||
|
||||
boolean workConditionMet() {
|
||||
return this.condition.getAsBoolean();
|
||||
}
|
||||
|
||||
public void blockTillEmpty() {
|
||||
while (this.activeCount.get() != 0 && this.alive) {
|
||||
while (this.jobCount2.get() != 0 && this.alive) {
|
||||
@@ -161,10 +165,23 @@ public class ServiceSlice extends TrackedObject {
|
||||
if (this.jobCount2.decrementAndGet() < 0) {
|
||||
throw new IllegalStateException("Job count negative!!!");
|
||||
}
|
||||
this.threadPool.steal(this);
|
||||
this.threadPool.steal(this, 1);
|
||||
return true;
|
||||
}
|
||||
|
||||
public int drain() {
|
||||
int count = this.jobCount.drainPermits();
|
||||
if (count == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (this.jobCount2.addAndGet(-count) < 0) {
|
||||
throw new IllegalStateException("Job count negative!!!");
|
||||
}
|
||||
this.threadPool.steal(this, count);
|
||||
return count;
|
||||
}
|
||||
|
||||
public boolean isAlive() {
|
||||
return this.alive;
|
||||
}
|
||||
|
||||
@@ -127,9 +127,9 @@ public class ServiceThreadPool {
|
||||
this.jobCounter.release(1);
|
||||
}
|
||||
|
||||
void steal(ServiceSlice service) {
|
||||
this.totalJobWeight.addAndGet(-service.weightPerJob);
|
||||
this.jobCounter.acquireUninterruptibly(1);
|
||||
void steal(ServiceSlice service, int count) {
|
||||
this.totalJobWeight.addAndGet(-(service.weightPerJob*(long)count));
|
||||
this.jobCounter.acquireUninterruptibly(count);
|
||||
}
|
||||
|
||||
private void worker(int threadId) {
|
||||
@@ -141,9 +141,17 @@ public class ServiceThreadPool {
|
||||
break;
|
||||
}
|
||||
|
||||
int attempts = 50;
|
||||
final int ATTEMPT_COUNT = 50;
|
||||
int attempts = ATTEMPT_COUNT;
|
||||
outer:
|
||||
while (true) {
|
||||
if (attempts < ATTEMPT_COUNT-2) {
|
||||
try {
|
||||
Thread.sleep(20);
|
||||
} catch (InterruptedException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
var ref = this.serviceSlices;
|
||||
if (ref.length == 0) {
|
||||
Logger.error("Service worker tried to run but had 0 slices");
|
||||
@@ -152,7 +160,7 @@ public class ServiceThreadPool {
|
||||
if (attempts-- == 0) {
|
||||
Logger.warn("Unable to execute service after many attempts, releasing");
|
||||
try {
|
||||
Thread.sleep(10);
|
||||
Thread.sleep(100);
|
||||
} catch (InterruptedException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
@@ -169,13 +177,20 @@ public class ServiceThreadPool {
|
||||
break;
|
||||
}
|
||||
|
||||
ServiceSlice service = ref[(int) (clamped % ref.length)];
|
||||
ServiceSlice service = ref[0];
|
||||
for (int i = 0; i < ref.length; i++) {
|
||||
service = ref[(int) ((clamped+i) % ref.length)];
|
||||
if (service.workConditionMet()) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
//1 in 64 chance just to pick a service that has a task, in a cycling manor, this is to keep at least one service from overloading all services constantly
|
||||
if (((seed>>10)&63) == 0) {
|
||||
for (int i = 0; i < ref.length; i++) {
|
||||
int idx = (i+revolvingSelector)%ref.length;
|
||||
var slice = ref[idx];
|
||||
if (slice.hasJobs()) {
|
||||
if (slice.hasJobs() && slice.workConditionMet()) {
|
||||
service = slice;
|
||||
revolvingSelector = (idx+1)%ref.length;
|
||||
break;
|
||||
@@ -186,7 +201,7 @@ public class ServiceThreadPool {
|
||||
long chosenNumber = clamped % weight;
|
||||
for (var slice : ref) {
|
||||
chosenNumber -= ((long) slice.weightPerJob) * slice.jobCount.availablePermits();
|
||||
if (chosenNumber <= 0) {
|
||||
if (chosenNumber <= 0 && slice.workConditionMet()) {
|
||||
service = slice;
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
package me.cortex.voxy.common.util;
|
||||
|
||||
import java.util.concurrent.ConcurrentLinkedDeque;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
public class MessageQueue <T> {
|
||||
private final Consumer<T> consumer;
|
||||
private final ConcurrentLinkedDeque<T> queue = new ConcurrentLinkedDeque<>();
|
||||
private final AtomicInteger count = new AtomicInteger(0);
|
||||
|
||||
public MessageQueue(Consumer<T> consumer) {
|
||||
this.consumer = consumer;
|
||||
@@ -13,6 +15,7 @@ public class MessageQueue <T> {
|
||||
|
||||
public void push(T obj) {
|
||||
this.queue.add(obj);
|
||||
this.count.addAndGet(1);
|
||||
}
|
||||
|
||||
public int consume() {
|
||||
@@ -23,10 +26,13 @@ public class MessageQueue <T> {
|
||||
int i = 0;
|
||||
while (i < max) {
|
||||
var entry = this.queue.poll();
|
||||
if (entry == null) return i;
|
||||
if (entry == null) break;
|
||||
i++;
|
||||
this.consumer.accept(entry);
|
||||
}
|
||||
if (i != 0) {
|
||||
this.count.addAndGet(-i);
|
||||
}
|
||||
return i;
|
||||
}
|
||||
|
||||
@@ -36,4 +42,7 @@ public class MessageQueue <T> {
|
||||
}
|
||||
}
|
||||
|
||||
public int count() {
|
||||
return this.count.get();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ public class VoxelIngestService {
|
||||
private final ConcurrentLinkedDeque<IngestSection> ingestQueue = new ConcurrentLinkedDeque<>();
|
||||
|
||||
public VoxelIngestService(ServiceThreadPool pool) {
|
||||
this.threads = pool.createServiceNoCleanup("Ingest service", 100, ()-> this::processJob);
|
||||
this.threads = pool.createServiceNoCleanup("Ingest service", 1000, ()-> this::processJob);
|
||||
}
|
||||
|
||||
private void processJob() {
|
||||
|
||||
Reference in New Issue
Block a user