Tidepool: cleanup, send data older than 3h
This commit is contained in:
parent
8c0eefac23
commit
50c1234630
8 changed files with 123 additions and 120 deletions
|
@ -8,6 +8,7 @@ import androidx.room.PrimaryKey
|
|||
import info.nightscout.database.entities.embedments.InterfaceIDs
|
||||
import info.nightscout.database.entities.interfaces.DBEntryWithTimeAndDuration
|
||||
import info.nightscout.database.entities.interfaces.TraceableDBEntry
|
||||
import info.nightscout.database.entities.interfaces.end
|
||||
import java.util.TimeZone
|
||||
|
||||
@Entity(
|
||||
|
@ -53,9 +54,6 @@ data class OfflineEvent(
|
|||
previous.interfaceIDs.nightscoutId == null &&
|
||||
interfaceIDs.nightscoutId != null
|
||||
|
||||
fun isRecordDeleted(other: OfflineEvent): Boolean =
|
||||
isValid && !other.isValid
|
||||
|
||||
enum class Reason {
|
||||
DISCONNECT_PUMP,
|
||||
SUSPEND,
|
||||
|
|
|
@ -8,7 +8,7 @@ import okhttp3.Response
|
|||
import okio.Buffer
|
||||
import java.io.IOException
|
||||
|
||||
class InfoInterceptor(val tag: String = "interceptor", val aapsLogger: AAPSLogger) : Interceptor {
|
||||
class InfoInterceptor(val aapsLogger: AAPSLogger) : Interceptor {
|
||||
|
||||
@Throws(IOException::class)
|
||||
override fun intercept(chain: Interceptor.Chain): Response {
|
||||
|
|
|
@ -4,38 +4,37 @@ import info.nightscout.plugins.sync.tidepool.messages.AuthReplyMessage
|
|||
import info.nightscout.plugins.sync.tidepool.messages.DatasetReplyMessage
|
||||
import okhttp3.Headers
|
||||
|
||||
class Session(val authHeader: String?,
|
||||
private val sessionTokenHeader: String,
|
||||
val service: TidepoolApiService?) {
|
||||
class Session(
|
||||
val authHeader: String?,
|
||||
private val sessionTokenHeader: String,
|
||||
val service: TidepoolApiService?
|
||||
) {
|
||||
|
||||
internal var token: String? = null
|
||||
internal var authReply: AuthReplyMessage? = null
|
||||
internal var datasetReply: DatasetReplyMessage? = null
|
||||
internal var start: Long = 0
|
||||
internal var end: Long = 0
|
||||
|
||||
@Volatile
|
||||
internal var iterations: Int = 0
|
||||
|
||||
fun populateHeaders(headers: Headers) {
|
||||
if (this.token == null) {
|
||||
this.token = headers.get(sessionTokenHeader)
|
||||
this.token = headers[sessionTokenHeader]
|
||||
}
|
||||
}
|
||||
|
||||
fun populateBody(obj: Any?) {
|
||||
if (obj == null) return
|
||||
if (obj is AuthReplyMessage) {
|
||||
authReply = obj
|
||||
} else if (obj is List<*>) {
|
||||
val list = obj as? List<*>?
|
||||
when (obj) {
|
||||
is AuthReplyMessage -> authReply = obj
|
||||
|
||||
list?.getOrNull(0)?.let {
|
||||
if (it is DatasetReplyMessage) {
|
||||
datasetReply = it
|
||||
is List<*> ->
|
||||
(obj as? List<*>?)?.getOrNull(0)?.let {
|
||||
if (it is DatasetReplyMessage) datasetReply = it
|
||||
}
|
||||
}
|
||||
} else if (obj is DatasetReplyMessage) {
|
||||
datasetReply = obj
|
||||
|
||||
is DatasetReplyMessage -> datasetReply = obj
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -8,7 +8,14 @@ import retrofit2.Call
|
|||
import retrofit2.Callback
|
||||
import retrofit2.Response
|
||||
|
||||
internal class TidepoolCallback<T>(private val aapsLogger: AAPSLogger, private val rxBus: RxBus, private val session: Session, val name: String, val onSuccess: () -> Unit, val onFail: () -> Unit) :
|
||||
internal class TidepoolCallback<T>(
|
||||
private val aapsLogger: AAPSLogger,
|
||||
private val rxBus: RxBus,
|
||||
private val session: Session,
|
||||
private val name: String,
|
||||
private val onSuccess: () -> Unit,
|
||||
private val onFail: () -> Unit
|
||||
) :
|
||||
Callback<T> {
|
||||
|
||||
override fun onResponse(call: Call<T>, response: Response<T>) {
|
||||
|
|
|
@ -71,7 +71,7 @@ class TidepoolUploader @Inject constructor(
|
|||
|
||||
val client = OkHttpClient.Builder()
|
||||
.addInterceptor(httpLoggingInterceptor)
|
||||
.addInterceptor(InfoInterceptor(TidepoolUploader::class.java.name, aapsLogger))
|
||||
.addInterceptor(InfoInterceptor(aapsLogger))
|
||||
.build()
|
||||
|
||||
retrofit = Retrofit.Builder()
|
||||
|
@ -114,12 +114,15 @@ class TidepoolUploader @Inject constructor(
|
|||
rxBus.send(EventTidepoolStatus(("Connecting")))
|
||||
val call = session?.service?.getLogin(authHeader)
|
||||
|
||||
call?.enqueue(TidepoolCallback<AuthReplyMessage>(aapsLogger, rxBus, session!!, "Login", {
|
||||
startSession(session!!, doUpload)
|
||||
}, {
|
||||
connectionStatus = ConnectionStatus.FAILED
|
||||
releaseWakeLock()
|
||||
}))
|
||||
call?.enqueue(TidepoolCallback<AuthReplyMessage>(
|
||||
aapsLogger, rxBus, session!!, "Login",
|
||||
{
|
||||
startSession(session!!, doUpload)
|
||||
}, {
|
||||
connectionStatus = ConnectionStatus.FAILED
|
||||
releaseWakeLock()
|
||||
})
|
||||
)
|
||||
return
|
||||
} else {
|
||||
aapsLogger.debug(LTag.TIDEPOOL, "Cannot do login as user credentials have not been set correctly")
|
||||
|
@ -135,15 +138,14 @@ class TidepoolUploader @Inject constructor(
|
|||
session.authHeader?.let {
|
||||
val call = session.service?.getLogin(it)
|
||||
|
||||
call?.enqueue(TidepoolCallback<AuthReplyMessage>(aapsLogger, rxBus, session, "Login", {
|
||||
OKDialog.show(rootContext, rh.gs(R.string.tidepool), "Successfully logged into Tidepool.")
|
||||
}, {
|
||||
OKDialog.show(
|
||||
rootContext,
|
||||
rh.gs(R.string.tidepool),
|
||||
"Failed to log into Tidepool.\nCheck that your user name and password are correct."
|
||||
)
|
||||
}))
|
||||
call?.enqueue(TidepoolCallback<AuthReplyMessage>(
|
||||
aapsLogger, rxBus, session, "Login",
|
||||
{
|
||||
OKDialog.show(rootContext, rh.gs(R.string.tidepool), "Successfully logged into Tidepool.")
|
||||
}, {
|
||||
OKDialog.show(rootContext, rh.gs(R.string.tidepool), "Failed to log into Tidepool.\nCheck that your user name and password are correct.")
|
||||
})
|
||||
)
|
||||
}
|
||||
?: OKDialog.show(rootContext, rh.gs(R.string.tidepool), "Cannot do login as user credentials have not been set correctly")
|
||||
|
||||
|
@ -158,39 +160,41 @@ class TidepoolUploader @Inject constructor(
|
|||
session.authReply!!.userid!!, "AAPS", 1
|
||||
)
|
||||
|
||||
datasetCall.enqueue(TidepoolCallback<List<DatasetReplyMessage>>(aapsLogger, rxBus, session, "Get Open Datasets", {
|
||||
if (session.datasetReply == null) {
|
||||
rxBus.send(EventTidepoolStatus(("Creating new dataset")))
|
||||
val call = session.service.openDataSet(
|
||||
session.token!!, session.authReply!!.userid!!,
|
||||
OpenDatasetRequestMessage(config, dateUtil).getBody()
|
||||
)
|
||||
call.enqueue(TidepoolCallback<DatasetReplyMessage>(aapsLogger, rxBus, session, "Open New Dataset", {
|
||||
connectionStatus = ConnectionStatus.CONNECTED
|
||||
rxBus.send(EventTidepoolStatus(("New dataset OK")))
|
||||
if (doUpload) doUpload()
|
||||
else
|
||||
releaseWakeLock()
|
||||
datasetCall.enqueue(
|
||||
TidepoolCallback<List<DatasetReplyMessage>>(
|
||||
aapsLogger, rxBus, session, "Get Open Datasets",
|
||||
{
|
||||
if (session.datasetReply == null) {
|
||||
rxBus.send(EventTidepoolStatus(("Creating new dataset")))
|
||||
val call = session.service.openDataSet(session.token!!, session.authReply!!.userid!!, OpenDatasetRequestMessage(config, dateUtil).getBody())
|
||||
call.enqueue(TidepoolCallback<DatasetReplyMessage>(
|
||||
aapsLogger, rxBus, session, "Open New Dataset",
|
||||
{
|
||||
connectionStatus = ConnectionStatus.CONNECTED
|
||||
rxBus.send(EventTidepoolStatus(("New dataset OK")))
|
||||
if (doUpload) doUpload()
|
||||
else releaseWakeLock()
|
||||
}, {
|
||||
rxBus.send(EventTidepoolStatus(("New dataset FAILED")))
|
||||
connectionStatus = ConnectionStatus.FAILED
|
||||
releaseWakeLock()
|
||||
})
|
||||
)
|
||||
} else {
|
||||
aapsLogger.debug(LTag.TIDEPOOL, "Existing Dataset: " + session.datasetReply!!.getUploadId())
|
||||
// TODO: Wouldn't need to do this if we could block on the above `call.enqueue`.
|
||||
// ie, do the openDataSet conditionally, and then do `doUpload` either way.
|
||||
connectionStatus = ConnectionStatus.CONNECTED
|
||||
rxBus.send(EventTidepoolStatus(("Appending to existing dataset")))
|
||||
if (doUpload) doUpload()
|
||||
else releaseWakeLock()
|
||||
}
|
||||
}, {
|
||||
rxBus.send(EventTidepoolStatus(("New dataset FAILED")))
|
||||
connectionStatus = ConnectionStatus.FAILED
|
||||
releaseWakeLock()
|
||||
}))
|
||||
} else {
|
||||
aapsLogger.debug(LTag.TIDEPOOL, "Existing Dataset: " + session.datasetReply!!.getUploadId())
|
||||
// TODO: Wouldn't need to do this if we could block on the above `call.enqueue`.
|
||||
// ie, do the openDataSet conditionally, and then do `doUpload` either way.
|
||||
connectionStatus = ConnectionStatus.CONNECTED
|
||||
rxBus.send(EventTidepoolStatus(("Appending to existing dataset")))
|
||||
if (doUpload) doUpload()
|
||||
else
|
||||
connectionStatus = ConnectionStatus.FAILED
|
||||
rxBus.send(EventTidepoolStatus(("Open dataset FAILED")))
|
||||
releaseWakeLock()
|
||||
}
|
||||
}, {
|
||||
connectionStatus = ConnectionStatus.FAILED
|
||||
rxBus.send(EventTidepoolStatus(("Open dataset FAILED")))
|
||||
releaseWakeLock()
|
||||
}))
|
||||
})
|
||||
)
|
||||
} else {
|
||||
aapsLogger.error("Got login response but cannot determine userId - cannot proceed")
|
||||
connectionStatus = ConnectionStatus.FAILED
|
||||
|
@ -234,16 +238,20 @@ class TidepoolUploader @Inject constructor(
|
|||
rxBus.send(EventTidepoolStatus(("Uploading")))
|
||||
if (session.service != null && session.token != null && session.datasetReply != null) {
|
||||
val call = session.service.doUpload(session.token!!, session.datasetReply!!.getUploadId()!!, body)
|
||||
call.enqueue(TidepoolCallback<UploadReplyMessage>(aapsLogger, rxBus, session, "Data Upload", {
|
||||
uploadChunk.setLastEnd(session.end)
|
||||
rxBus.send(EventTidepoolStatus(("Upload completed OK")))
|
||||
releaseWakeLock()
|
||||
uploadNext()
|
||||
}, {
|
||||
connectionStatus = ConnectionStatus.DISCONNECTED
|
||||
rxBus.send(EventTidepoolStatus(("Upload FAILED")))
|
||||
releaseWakeLock()
|
||||
}))
|
||||
call.enqueue(TidepoolCallback<UploadReplyMessage>(
|
||||
aapsLogger, rxBus, session, "Data Upload",
|
||||
{
|
||||
uploadChunk.setLastEnd(session.end)
|
||||
connectionStatus = ConnectionStatus.CONNECTED
|
||||
rxBus.send(EventTidepoolStatus(("Upload completed OK")))
|
||||
releaseWakeLock()
|
||||
uploadNext()
|
||||
}, {
|
||||
connectionStatus = ConnectionStatus.DISCONNECTED
|
||||
rxBus.send(EventTidepoolStatus(("Upload FAILED")))
|
||||
releaseWakeLock()
|
||||
})
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -256,7 +264,7 @@ class TidepoolUploader @Inject constructor(
|
|||
aapsLogger.debug(LTag.TIDEPOOL, "Blocked by connectivity settings")
|
||||
return
|
||||
}
|
||||
if (uploadChunk.getLastEnd() < dateUtil.now() - T.mins(1).msecs()) {
|
||||
if (uploadChunk.getLastEnd() < dateUtil.now() - T.hours(3).msecs() - T.mins(1).msecs()) {
|
||||
SystemClock.sleep(3000)
|
||||
aapsLogger.debug(LTag.TIDEPOOL, "Restarting doUpload. Last: " + dateUtil.dateAndTimeString(uploadChunk.getLastEnd()))
|
||||
doUpload()
|
||||
|
@ -267,15 +275,18 @@ class TidepoolUploader @Inject constructor(
|
|||
if (session?.datasetReply?.id != null) {
|
||||
extendWakeLock(60000)
|
||||
val call = session!!.service?.deleteDataSet(session!!.token!!, session!!.datasetReply!!.id!!)
|
||||
call?.enqueue(TidepoolCallback(aapsLogger, rxBus, session!!, "Delete Dataset", {
|
||||
connectionStatus = ConnectionStatus.DISCONNECTED
|
||||
rxBus.send(EventTidepoolStatus(("Dataset removed OK")))
|
||||
releaseWakeLock()
|
||||
}, {
|
||||
connectionStatus = ConnectionStatus.DISCONNECTED
|
||||
rxBus.send(EventTidepoolStatus(("Dataset remove FAILED")))
|
||||
releaseWakeLock()
|
||||
}))
|
||||
call?.enqueue(TidepoolCallback(
|
||||
aapsLogger, rxBus, session!!, "Delete Dataset",
|
||||
{
|
||||
connectionStatus = ConnectionStatus.DISCONNECTED
|
||||
rxBus.send(EventTidepoolStatus(("Dataset removed OK")))
|
||||
releaseWakeLock()
|
||||
}, {
|
||||
connectionStatus = ConnectionStatus.DISCONNECTED
|
||||
rxBus.send(EventTidepoolStatus(("Dataset remove FAILED")))
|
||||
releaseWakeLock()
|
||||
})
|
||||
)
|
||||
} else {
|
||||
aapsLogger.error("Got login response but cannot determine datasetId - cannot proceed")
|
||||
}
|
||||
|
@ -292,15 +303,19 @@ class TidepoolUploader @Inject constructor(
|
|||
requireNotNull(userId)
|
||||
extendWakeLock(60000)
|
||||
val call = session.service?.deleteAllData(token, userId)
|
||||
call?.enqueue(TidepoolCallback(aapsLogger, rxBus, session, "Delete all data", {
|
||||
connectionStatus = ConnectionStatus.DISCONNECTED
|
||||
rxBus.send(EventTidepoolStatus(("All data removed OK")))
|
||||
releaseWakeLock()
|
||||
}, {
|
||||
connectionStatus = ConnectionStatus.DISCONNECTED
|
||||
rxBus.send(EventTidepoolStatus(("All data remove FAILED")))
|
||||
releaseWakeLock()
|
||||
}))
|
||||
call?.enqueue(
|
||||
TidepoolCallback(
|
||||
aapsLogger, rxBus, session, "Delete all data",
|
||||
{
|
||||
connectionStatus = ConnectionStatus.DISCONNECTED
|
||||
rxBus.send(EventTidepoolStatus(("All data removed OK")))
|
||||
releaseWakeLock()
|
||||
}, {
|
||||
connectionStatus = ConnectionStatus.DISCONNECTED
|
||||
rxBus.send(EventTidepoolStatus(("All data remove FAILED")))
|
||||
releaseWakeLock()
|
||||
})
|
||||
)
|
||||
} catch (e: IllegalArgumentException) {
|
||||
aapsLogger.error("Got login response but cannot determine userId - cannot proceed")
|
||||
}
|
||||
|
@ -321,13 +336,12 @@ class TidepoolUploader @Inject constructor(
|
|||
@Synchronized
|
||||
private fun releaseWakeLock() {
|
||||
wl?.let {
|
||||
if (it.isHeld) {
|
||||
if (it.isHeld)
|
||||
try {
|
||||
it.release()
|
||||
} catch (e: Exception) {
|
||||
aapsLogger.error("Error releasing wakelock: $e")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -41,21 +41,21 @@ class UploadChunk @Inject constructor(
|
|||
private val maxUploadSize = T.days(7).msecs() // don't change this
|
||||
|
||||
fun getNext(session: Session?): String? {
|
||||
if (session == null)
|
||||
return null
|
||||
session ?: return null
|
||||
|
||||
session.start = getLastEnd()
|
||||
session.end = min(session.start + maxUploadSize, dateUtil.now())
|
||||
// do not upload last 3h, TBR can be still running
|
||||
session.end = min(session.start + maxUploadSize, dateUtil.now() - T.hours(3).msecs())
|
||||
|
||||
val result = get(session.start, session.end)
|
||||
if (result.length < 3) {
|
||||
aapsLogger.debug(LTag.TIDEPOOL, "No records in this time period, setting start to best end time")
|
||||
setLastEnd(max(session.end, getOldestRecordTimeStamp()))
|
||||
setLastEnd(session.end)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
operator fun get(start: Long, end: Long): String {
|
||||
fun get(start: Long, end: Long): String {
|
||||
|
||||
aapsLogger.debug(LTag.TIDEPOOL, "Syncing data between: " + dateUtil.dateAndTimeString(start) + " -> " + dateUtil.dateAndTimeString(end))
|
||||
if (end <= start) {
|
||||
|
@ -94,21 +94,6 @@ class UploadChunk @Inject constructor(
|
|||
}
|
||||
}
|
||||
|
||||
// numeric limits must match max time windows
|
||||
|
||||
private fun getOldestRecordTimeStamp(): Long {
|
||||
// TODO we could make sure we include records older than the first bg record for completeness
|
||||
|
||||
val start: Long = 0
|
||||
val end = dateUtil.now()
|
||||
|
||||
val bgReadingList = repository.compatGetBgReadingsDataFromTime(start, end, true)
|
||||
.blockingGet()
|
||||
return if (bgReadingList.isNotEmpty())
|
||||
bgReadingList[0].timestamp
|
||||
else -1
|
||||
}
|
||||
|
||||
private fun getTreatments(start: Long, end: Long): List<BaseElement> {
|
||||
val result = LinkedList<BaseElement>()
|
||||
repository.getBolusesDataFromTimeToTime(start, end, true)
|
||||
|
|
|
@ -7,7 +7,7 @@ import info.nightscout.interfaces.profile.Profile
|
|||
import info.nightscout.shared.utils.DateUtil
|
||||
import java.util.UUID
|
||||
|
||||
class BasalElement(tbr: TemporaryBasal, private val profile: Profile, dateUtil: DateUtil)
|
||||
class BasalElement(tbr: TemporaryBasal, profile: Profile, dateUtil: DateUtil)
|
||||
: BaseElement(tbr.timestamp, UUID.nameUUIDFromBytes(("AAPS-basal" + tbr.timestamp).toByteArray()).toString(), dateUtil) {
|
||||
|
||||
internal var timestamp: Long = 0 // not exposed
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
<string name="key_ns_device_status_last_synced_id" translatable="false">ns_device_status_last_synced_id</string>
|
||||
<string name="key_ns_temporary_basal_last_synced_id" translatable="false">ns_temporary_basal_last_synced_id</string>
|
||||
<string name="key_ns_extended_bolus_last_synced_id" translatable="false">ns_extended_bolus_last_synced_id</string>
|
||||
<string name="key_ns_profile_switch_last_synced_id" translatable="false">profile_switch_last_synced_id</string>
|
||||
<string name="key_ns_profile_switch_last_synced_id" translatable="false">ns_profile_switch_last_synced_id</string>
|
||||
<string name="key_ns_effective_profile_switch_last_synced_id" translatable="false">ns_effective_profile_switch_last_synced_id</string>
|
||||
<string name="key_ns_offline_event_last_synced_id" translatable="false">ns_offline_event_last_synced_id</string>
|
||||
<string name="key_ns_profile_store_last_synced_timestamp" translatable="false">ns_profile_store_last_synced_timestamp</string>
|
||||
|
|
Loading…
Reference in a new issue