fix: opti cleanup geodata
All checks were successful
/ build (map[dockerfile:./services/api/Dockerfile name:api]) (push) Successful in 2m41s
/ build (map[dockerfile:./services/web/Dockerfile name:web]) (push) Successful in 1m50s
/ build (map[dockerfile:./services/tasks/Dockerfile name:tasks]) (push) Successful in 2m24s
/ build (map[dockerfile:./services/app/Dockerfile name:app]) (push) Successful in 45s
/ build (map[dockerfile:./services/hasura/Dockerfile name:hasura]) (push) Successful in 1m3s
/ build (map[dockerfile:./services/watchers/Dockerfile name:watchers]) (push) Successful in 2m10s
/ build (map[dockerfile:./services/files/Dockerfile name:files]) (push) Successful in 2m31s
/ deploy (push) Successful in 14s
All checks were successful
/ build (map[dockerfile:./services/api/Dockerfile name:api]) (push) Successful in 2m41s
/ build (map[dockerfile:./services/web/Dockerfile name:web]) (push) Successful in 1m50s
/ build (map[dockerfile:./services/tasks/Dockerfile name:tasks]) (push) Successful in 2m24s
/ build (map[dockerfile:./services/app/Dockerfile name:app]) (push) Successful in 45s
/ build (map[dockerfile:./services/hasura/Dockerfile name:hasura]) (push) Successful in 1m3s
/ build (map[dockerfile:./services/watchers/Dockerfile name:watchers]) (push) Successful in 2m10s
/ build (map[dockerfile:./services/files/Dockerfile name:files]) (push) Successful in 2m31s
/ deploy (push) Successful in 14s
This commit is contained in:
parent
dc351b2ed6
commit
f63766314b
1 changed files with 55 additions and 103 deletions
|
@ -21,61 +21,13 @@ module.exports = async function () {
|
|||
return async function geodataCleanupCron() {
|
||||
logger.info("watcher geodataCleanupCron: daemon started")
|
||||
|
||||
// this is temporary function (fixing actual data)
|
||||
async function cleanupOrphanedHotGeodata() {
|
||||
// Get all devices from hot storage
|
||||
const hotDevices = new Set()
|
||||
let hotCursor = "0"
|
||||
do {
|
||||
// Use zscan to iterate through the sorted set
|
||||
const [newCursor, items] = await redisHot.zscan(
|
||||
HOTGEODATA_KEY,
|
||||
hotCursor,
|
||||
"COUNT",
|
||||
"100"
|
||||
)
|
||||
hotCursor = newCursor
|
||||
|
||||
// Extract device IDs (every other item in the result is a score)
|
||||
for (let i = 0; i < items.length; i += 2) {
|
||||
hotDevices.add(items[i])
|
||||
}
|
||||
} while (hotCursor !== "0")
|
||||
|
||||
// Process each hot device
|
||||
await async.eachLimit(
|
||||
[...hotDevices],
|
||||
MAX_PARALLEL_PROCESS,
|
||||
async (deviceId) => {
|
||||
try {
|
||||
// Check if device exists in cold storage
|
||||
const coldKey = `${COLDGEODATA_DEVICE_KEY_PREFIX}${deviceId}`
|
||||
const exists = await redisCold.exists(coldKey)
|
||||
|
||||
// If device doesn't exist in cold storage, remove it from hot storage
|
||||
if (!exists) {
|
||||
await redisHot.zrem(HOTGEODATA_KEY, deviceId)
|
||||
logger.debug(
|
||||
{ deviceId },
|
||||
"Removed orphaned device data from hot storage (not found in cold storage)"
|
||||
)
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error(
|
||||
{ error, deviceId },
|
||||
"Error checking orphaned device data"
|
||||
)
|
||||
}
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
// TODO optimize by removing memory accumulation (cursor iteration to make it scalable)
|
||||
// Process keys in batches to avoid memory accumulation
|
||||
async function cleanupOldGeodata() {
|
||||
const now = Math.floor(Date.now() / 1000) // Current time in seconds
|
||||
const coldKeys = new Set() // Store cold geodata keys
|
||||
let coldCursor = "0"
|
||||
|
||||
do {
|
||||
// Get batch of keys using SCAN
|
||||
const [newCursor, keys] = await redisCold.scan(
|
||||
coldCursor,
|
||||
"MATCH",
|
||||
|
@ -84,13 +36,10 @@ module.exports = async function () {
|
|||
"100"
|
||||
)
|
||||
coldCursor = newCursor
|
||||
keys.forEach((key) => coldKeys.add(key))
|
||||
} while (coldCursor !== "0")
|
||||
|
||||
await async.eachLimit(
|
||||
[...coldKeys],
|
||||
MAX_PARALLEL_PROCESS,
|
||||
async (key) => {
|
||||
// Process this batch of keys immediately
|
||||
if (keys.length > 0) {
|
||||
await async.eachLimit(keys, MAX_PARALLEL_PROCESS, async (key) => {
|
||||
const deviceId = key.slice(COLDGEODATA_DEVICE_KEY_PREFIX.length)
|
||||
|
||||
try {
|
||||
|
@ -138,7 +87,10 @@ module.exports = async function () {
|
|||
)
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error({ error, deviceId }, "Error cleaning device data")
|
||||
logger.error(
|
||||
{ error, deviceId },
|
||||
"Error cleaning device data"
|
||||
)
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
|
@ -147,14 +99,14 @@ module.exports = async function () {
|
|||
"Error processing device data from cold storage"
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
)
|
||||
} while (coldCursor !== "0")
|
||||
}
|
||||
|
||||
// Schedule both cleanup functions to run periodically
|
||||
cron.schedule(CLEANUP_CRON, async () => {
|
||||
await cleanupOldGeodata()
|
||||
await cleanupOrphanedHotGeodata()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue