Commit e6597a77 authored by jorlow@chromium.org's avatar jorlow@chromium.org

Trottle sync requests sent to the LocalStorage background thread

https://bugs.webkit.org/show_bug.cgi?id=34943

Reviewed by Darin Fisher.

Currently, once a second LocalStorage takes all keys/values which have
been changed and sends them to a background thread to sync.  The problem
is that this background thread can get overwhelmed and stop being
responsive.  This means that if any other page tries to start using
LocalStorage (and thus initiates the initial import) that'll block on
all the previous syncs completing.

To mitigate this, I'm adding code so that we never schedule another
sync task when another is still running.  In order to keep the sync
tasks from growing exponentially when they do take longer than the
storage sync interval, I've also added a basic rate limiter.  No effort
is made to ensure fairness/ordering of what gets synced nor is there
any way for this rate to be changed because most normal uses of
LocalStorage really shouldn't be hitting these types of limits anyway.

The only behavioral change that's observible in JavaScript is time based
and thus it's not practical to make new tests that aren't racy.  The
existing layout tests cover LocalStorage pretty well, though.

* storage/StorageAreaSync.cpp:
(WebCore::StorageAreaSync::StorageAreaSync):
(WebCore::StorageAreaSync::scheduleFinalSync):
(WebCore::StorageAreaSync::syncTimerFired):
(WebCore::StorageAreaSync::performSync):
* storage/StorageAreaSync.h:

git-svn-id: http://svn.webkit.org/repository/webkit/trunk@55523 268f45cc-cd09-0410-ab3c-d52691b4dbfc
parent 3adc7312
2010-03-03 Jeremy Orlow <jorlow@chromium.org>
Reviewed by Darin Fisher.
Trottle sync requests sent to the LocalStorage background thread
https://bugs.webkit.org/show_bug.cgi?id=34943
Currently, once a second LocalStorage takes all keys/values which have
been changed and sends them to a background thread to sync. The problem
is that this background thread can get overwhelmed and stop being
responsive. This means that if any other page tries to start using
LocalStorage (and thus initiates the initial import) that'll block on
all the previous syncs completing.
To mitigate this, I'm adding code so that we never schedule another
sync task when another is still running. In order to keep the sync
tasks from growing exponentially when they do take longer than the
storage sync interval, I've also added a basic rate limiter. No effort
is made to ensure fairness/ordering of what gets synced nor is there
any way for this rate to be changed because most normal uses of
LocalStorage really shouldn't be hitting these types of limits anyway.
The only behavioral change that's observible in JavaScript is time based
and thus it's not practical to make new tests that aren't racy. The
existing layout tests cover LocalStorage pretty well, though.
* storage/StorageAreaSync.cpp:
(WebCore::StorageAreaSync::StorageAreaSync):
(WebCore::StorageAreaSync::scheduleFinalSync):
(WebCore::StorageAreaSync::syncTimerFired):
(WebCore::StorageAreaSync::performSync):
* storage/StorageAreaSync.h:
2010-03-04 Andrey Kosyakov <caseq@chromium.org>
Reviewed by Pavel Feldman.
......
......@@ -43,6 +43,10 @@ namespace WebCore {
// Instead, queue up a batch of items to sync and actually do the sync at the following interval.
static const double StorageSyncInterval = 1.0;
// A sane limit on how many items we'll schedule to sync all at once. This makes it
// much harder to starve the rest of LocalStorage and the OS's IO subsystem in general.
static const int MaxiumItemsToSync = 100;
PassRefPtr<StorageAreaSync> StorageAreaSync::create(PassRefPtr<StorageSyncManager> storageSyncManager, PassRefPtr<StorageAreaImpl> storageArea, String databaseIdentifier)
{
return adoptRef(new StorageAreaSync(storageSyncManager, storageArea, databaseIdentifier));
......@@ -57,6 +61,7 @@ StorageAreaSync::StorageAreaSync(PassRefPtr<StorageSyncManager> storageSyncManag
, m_databaseIdentifier(databaseIdentifier.crossThreadString())
, m_clearItemsWhileSyncing(false)
, m_syncScheduled(false)
, m_syncInProgress(false)
, m_importComplete(false)
{
ASSERT(isMainThread());
......@@ -92,8 +97,8 @@ void StorageAreaSync::scheduleFinalSync()
}
// FIXME: This is synchronous. We should do it on the background process, but
// we should do it safely.
syncTimerFired(&m_syncTimer);
m_finalSyncScheduled = true;
syncTimerFired(&m_syncTimer);
}
void StorageAreaSync::scheduleItemForSync(const String& key, const String& value)
......@@ -131,20 +136,43 @@ void StorageAreaSync::syncTimerFired(Timer<StorageAreaSync>*)
{
ASSERT(isMainThread());
HashMap<String, String>::iterator it = m_changedItems.begin();
HashMap<String, String>::iterator end = m_changedItems.end();
bool partialSync = false;
{
MutexLocker locker(m_syncLock);
// Do not schedule another sync if we're still trying to complete the
// previous one. But, if we're shutting down, schedule it anyway.
if (m_syncInProgress && !m_finalSyncScheduled) {
ASSERT(!m_syncTimer.isActive());
m_syncTimer.startOneShot(StorageSyncInterval);
return;
}
if (m_itemsCleared) {
m_itemsPendingSync.clear();
m_clearItemsWhileSyncing = true;
m_itemsCleared = false;
}
for (; it != end; ++it)
m_itemsPendingSync.set(it->first.crossThreadString(), it->second.crossThreadString());
HashMap<String, String>::iterator changed_it = m_changedItems.begin();
HashMap<String, String>::iterator changed_end = m_changedItems.end();
for (int count = 0; changed_it != changed_end; ++count, ++changed_it) {
if (count >= MaxiumItemsToSync && !m_finalSyncScheduled) {
partialSync = true;
break;
}
m_itemsPendingSync.set(changed_it->first.crossThreadString(), changed_it->second.crossThreadString());
}
if (partialSync) {
// We can't do the fast path of simply clearing all items, so we'll need to manually
// remove them one by one. Done under lock since m_itemsPendingSync is modified by
// the background thread.
HashMap<String, String>::iterator pending_it = m_itemsPendingSync.begin();
HashMap<String, String>::iterator pending_end = m_itemsPendingSync.end();
for (; pending_it != pending_end; ++pending_it)
m_changedItems.remove(pending_it->first);
}
if (!m_syncScheduled) {
m_syncScheduled = true;
......@@ -157,11 +185,17 @@ void StorageAreaSync::syncTimerFired(Timer<StorageAreaSync>*)
}
}
// The following is balanced by the calls to disableSuddenTermination in the
// scheduleItemForSync, scheduleClear, and scheduleFinalSync functions.
enableSuddenTermination();
if (partialSync) {
// If we didn't finish syncing, then we need to finish the job later.
ASSERT(!m_syncTimer.isActive());
m_syncTimer.startOneShot(StorageSyncInterval);
} else {
// The following is balanced by the calls to disableSuddenTermination in the
// scheduleItemForSync, scheduleClear, and scheduleFinalSync functions.
enableSuddenTermination();
m_changedItems.clear();
m_changedItems.clear();
}
}
void StorageAreaSync::performImport()
......@@ -319,10 +353,16 @@ void StorageAreaSync::performSync()
m_clearItemsWhileSyncing = false;
m_syncScheduled = false;
m_syncInProgress = true;
}
sync(clearItems, items);
{
MutexLocker locker(m_syncLock);
m_syncInProgress = false;
}
// The following is balanced by the call to disableSuddenTermination in the
// syncTimerFired function.
enableSuddenTermination();
......
......@@ -84,6 +84,7 @@ namespace WebCore {
HashMap<String, String> m_itemsPendingSync;
bool m_clearItemsWhileSyncing;
bool m_syncScheduled;
bool m_syncInProgress;
mutable Mutex m_importLock;
mutable ThreadCondition m_importCondition;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment