Tidepool: cleanup, send data older than 3h

This commit is contained in:
Milos Kozak 2023-02-01 07:49:23 +01:00
parent 8c0eefac23
commit 50c1234630
8 changed files with 123 additions and 120 deletions

View file

@ -8,6 +8,7 @@ import androidx.room.PrimaryKey
import info.nightscout.database.entities.embedments.InterfaceIDs import info.nightscout.database.entities.embedments.InterfaceIDs
import info.nightscout.database.entities.interfaces.DBEntryWithTimeAndDuration import info.nightscout.database.entities.interfaces.DBEntryWithTimeAndDuration
import info.nightscout.database.entities.interfaces.TraceableDBEntry import info.nightscout.database.entities.interfaces.TraceableDBEntry
import info.nightscout.database.entities.interfaces.end
import java.util.TimeZone import java.util.TimeZone
@Entity( @Entity(
@ -53,9 +54,6 @@ data class OfflineEvent(
previous.interfaceIDs.nightscoutId == null && previous.interfaceIDs.nightscoutId == null &&
interfaceIDs.nightscoutId != null interfaceIDs.nightscoutId != null
fun isRecordDeleted(other: OfflineEvent): Boolean =
isValid && !other.isValid
enum class Reason { enum class Reason {
DISCONNECT_PUMP, DISCONNECT_PUMP,
SUSPEND, SUSPEND,

View file

@ -8,7 +8,7 @@ import okhttp3.Response
import okio.Buffer import okio.Buffer
import java.io.IOException import java.io.IOException
class InfoInterceptor(val tag: String = "interceptor", val aapsLogger: AAPSLogger) : Interceptor { class InfoInterceptor(val aapsLogger: AAPSLogger) : Interceptor {
@Throws(IOException::class) @Throws(IOException::class)
override fun intercept(chain: Interceptor.Chain): Response { override fun intercept(chain: Interceptor.Chain): Response {

View file

@ -4,38 +4,37 @@ import info.nightscout.plugins.sync.tidepool.messages.AuthReplyMessage
import info.nightscout.plugins.sync.tidepool.messages.DatasetReplyMessage import info.nightscout.plugins.sync.tidepool.messages.DatasetReplyMessage
import okhttp3.Headers import okhttp3.Headers
class Session(val authHeader: String?, class Session(
private val sessionTokenHeader: String, val authHeader: String?,
val service: TidepoolApiService?) { private val sessionTokenHeader: String,
val service: TidepoolApiService?
) {
internal var token: String? = null internal var token: String? = null
internal var authReply: AuthReplyMessage? = null internal var authReply: AuthReplyMessage? = null
internal var datasetReply: DatasetReplyMessage? = null internal var datasetReply: DatasetReplyMessage? = null
internal var start: Long = 0 internal var start: Long = 0
internal var end: Long = 0 internal var end: Long = 0
@Volatile @Volatile
internal var iterations: Int = 0 internal var iterations: Int = 0
fun populateHeaders(headers: Headers) { fun populateHeaders(headers: Headers) {
if (this.token == null) { if (this.token == null) {
this.token = headers.get(sessionTokenHeader) this.token = headers[sessionTokenHeader]
} }
} }
fun populateBody(obj: Any?) { fun populateBody(obj: Any?) {
if (obj == null) return when (obj) {
if (obj is AuthReplyMessage) { is AuthReplyMessage -> authReply = obj
authReply = obj
} else if (obj is List<*>) {
val list = obj as? List<*>?
list?.getOrNull(0)?.let { is List<*> ->
if (it is DatasetReplyMessage) { (obj as? List<*>?)?.getOrNull(0)?.let {
datasetReply = it if (it is DatasetReplyMessage) datasetReply = it
} }
}
} else if (obj is DatasetReplyMessage) { is DatasetReplyMessage -> datasetReply = obj
datasetReply = obj
} }
} }
} }

View file

@ -8,7 +8,14 @@ import retrofit2.Call
import retrofit2.Callback import retrofit2.Callback
import retrofit2.Response import retrofit2.Response
internal class TidepoolCallback<T>(private val aapsLogger: AAPSLogger, private val rxBus: RxBus, private val session: Session, val name: String, val onSuccess: () -> Unit, val onFail: () -> Unit) : internal class TidepoolCallback<T>(
private val aapsLogger: AAPSLogger,
private val rxBus: RxBus,
private val session: Session,
private val name: String,
private val onSuccess: () -> Unit,
private val onFail: () -> Unit
) :
Callback<T> { Callback<T> {
override fun onResponse(call: Call<T>, response: Response<T>) { override fun onResponse(call: Call<T>, response: Response<T>) {

View file

@ -71,7 +71,7 @@ class TidepoolUploader @Inject constructor(
val client = OkHttpClient.Builder() val client = OkHttpClient.Builder()
.addInterceptor(httpLoggingInterceptor) .addInterceptor(httpLoggingInterceptor)
.addInterceptor(InfoInterceptor(TidepoolUploader::class.java.name, aapsLogger)) .addInterceptor(InfoInterceptor(aapsLogger))
.build() .build()
retrofit = Retrofit.Builder() retrofit = Retrofit.Builder()
@ -114,12 +114,15 @@ class TidepoolUploader @Inject constructor(
rxBus.send(EventTidepoolStatus(("Connecting"))) rxBus.send(EventTidepoolStatus(("Connecting")))
val call = session?.service?.getLogin(authHeader) val call = session?.service?.getLogin(authHeader)
call?.enqueue(TidepoolCallback<AuthReplyMessage>(aapsLogger, rxBus, session!!, "Login", { call?.enqueue(TidepoolCallback<AuthReplyMessage>(
startSession(session!!, doUpload) aapsLogger, rxBus, session!!, "Login",
}, { {
connectionStatus = ConnectionStatus.FAILED startSession(session!!, doUpload)
releaseWakeLock() }, {
})) connectionStatus = ConnectionStatus.FAILED
releaseWakeLock()
})
)
return return
} else { } else {
aapsLogger.debug(LTag.TIDEPOOL, "Cannot do login as user credentials have not been set correctly") aapsLogger.debug(LTag.TIDEPOOL, "Cannot do login as user credentials have not been set correctly")
@ -135,15 +138,14 @@ class TidepoolUploader @Inject constructor(
session.authHeader?.let { session.authHeader?.let {
val call = session.service?.getLogin(it) val call = session.service?.getLogin(it)
call?.enqueue(TidepoolCallback<AuthReplyMessage>(aapsLogger, rxBus, session, "Login", { call?.enqueue(TidepoolCallback<AuthReplyMessage>(
OKDialog.show(rootContext, rh.gs(R.string.tidepool), "Successfully logged into Tidepool.") aapsLogger, rxBus, session, "Login",
}, { {
OKDialog.show( OKDialog.show(rootContext, rh.gs(R.string.tidepool), "Successfully logged into Tidepool.")
rootContext, }, {
rh.gs(R.string.tidepool), OKDialog.show(rootContext, rh.gs(R.string.tidepool), "Failed to log into Tidepool.\nCheck that your user name and password are correct.")
"Failed to log into Tidepool.\nCheck that your user name and password are correct." })
) )
}))
} }
?: OKDialog.show(rootContext, rh.gs(R.string.tidepool), "Cannot do login as user credentials have not been set correctly") ?: OKDialog.show(rootContext, rh.gs(R.string.tidepool), "Cannot do login as user credentials have not been set correctly")
@ -158,39 +160,41 @@ class TidepoolUploader @Inject constructor(
session.authReply!!.userid!!, "AAPS", 1 session.authReply!!.userid!!, "AAPS", 1
) )
datasetCall.enqueue(TidepoolCallback<List<DatasetReplyMessage>>(aapsLogger, rxBus, session, "Get Open Datasets", { datasetCall.enqueue(
if (session.datasetReply == null) { TidepoolCallback<List<DatasetReplyMessage>>(
rxBus.send(EventTidepoolStatus(("Creating new dataset"))) aapsLogger, rxBus, session, "Get Open Datasets",
val call = session.service.openDataSet( {
session.token!!, session.authReply!!.userid!!, if (session.datasetReply == null) {
OpenDatasetRequestMessage(config, dateUtil).getBody() rxBus.send(EventTidepoolStatus(("Creating new dataset")))
) val call = session.service.openDataSet(session.token!!, session.authReply!!.userid!!, OpenDatasetRequestMessage(config, dateUtil).getBody())
call.enqueue(TidepoolCallback<DatasetReplyMessage>(aapsLogger, rxBus, session, "Open New Dataset", { call.enqueue(TidepoolCallback<DatasetReplyMessage>(
connectionStatus = ConnectionStatus.CONNECTED aapsLogger, rxBus, session, "Open New Dataset",
rxBus.send(EventTidepoolStatus(("New dataset OK"))) {
if (doUpload) doUpload() connectionStatus = ConnectionStatus.CONNECTED
else rxBus.send(EventTidepoolStatus(("New dataset OK")))
releaseWakeLock() if (doUpload) doUpload()
else releaseWakeLock()
}, {
rxBus.send(EventTidepoolStatus(("New dataset FAILED")))
connectionStatus = ConnectionStatus.FAILED
releaseWakeLock()
})
)
} else {
aapsLogger.debug(LTag.TIDEPOOL, "Existing Dataset: " + session.datasetReply!!.getUploadId())
// TODO: Wouldn't need to do this if we could block on the above `call.enqueue`.
// ie, do the openDataSet conditionally, and then do `doUpload` either way.
connectionStatus = ConnectionStatus.CONNECTED
rxBus.send(EventTidepoolStatus(("Appending to existing dataset")))
if (doUpload) doUpload()
else releaseWakeLock()
}
}, { }, {
rxBus.send(EventTidepoolStatus(("New dataset FAILED"))) connectionStatus = ConnectionStatus.FAILED
connectionStatus = ConnectionStatus.FAILED rxBus.send(EventTidepoolStatus(("Open dataset FAILED")))
releaseWakeLock()
}))
} else {
aapsLogger.debug(LTag.TIDEPOOL, "Existing Dataset: " + session.datasetReply!!.getUploadId())
// TODO: Wouldn't need to do this if we could block on the above `call.enqueue`.
// ie, do the openDataSet conditionally, and then do `doUpload` either way.
connectionStatus = ConnectionStatus.CONNECTED
rxBus.send(EventTidepoolStatus(("Appending to existing dataset")))
if (doUpload) doUpload()
else
releaseWakeLock() releaseWakeLock()
} })
}, { )
connectionStatus = ConnectionStatus.FAILED
rxBus.send(EventTidepoolStatus(("Open dataset FAILED")))
releaseWakeLock()
}))
} else { } else {
aapsLogger.error("Got login response but cannot determine userId - cannot proceed") aapsLogger.error("Got login response but cannot determine userId - cannot proceed")
connectionStatus = ConnectionStatus.FAILED connectionStatus = ConnectionStatus.FAILED
@ -234,16 +238,20 @@ class TidepoolUploader @Inject constructor(
rxBus.send(EventTidepoolStatus(("Uploading"))) rxBus.send(EventTidepoolStatus(("Uploading")))
if (session.service != null && session.token != null && session.datasetReply != null) { if (session.service != null && session.token != null && session.datasetReply != null) {
val call = session.service.doUpload(session.token!!, session.datasetReply!!.getUploadId()!!, body) val call = session.service.doUpload(session.token!!, session.datasetReply!!.getUploadId()!!, body)
call.enqueue(TidepoolCallback<UploadReplyMessage>(aapsLogger, rxBus, session, "Data Upload", { call.enqueue(TidepoolCallback<UploadReplyMessage>(
uploadChunk.setLastEnd(session.end) aapsLogger, rxBus, session, "Data Upload",
rxBus.send(EventTidepoolStatus(("Upload completed OK"))) {
releaseWakeLock() uploadChunk.setLastEnd(session.end)
uploadNext() connectionStatus = ConnectionStatus.CONNECTED
}, { rxBus.send(EventTidepoolStatus(("Upload completed OK")))
connectionStatus = ConnectionStatus.DISCONNECTED releaseWakeLock()
rxBus.send(EventTidepoolStatus(("Upload FAILED"))) uploadNext()
releaseWakeLock() }, {
})) connectionStatus = ConnectionStatus.DISCONNECTED
rxBus.send(EventTidepoolStatus(("Upload FAILED")))
releaseWakeLock()
})
)
} }
} }
} }
@ -256,7 +264,7 @@ class TidepoolUploader @Inject constructor(
aapsLogger.debug(LTag.TIDEPOOL, "Blocked by connectivity settings") aapsLogger.debug(LTag.TIDEPOOL, "Blocked by connectivity settings")
return return
} }
if (uploadChunk.getLastEnd() < dateUtil.now() - T.mins(1).msecs()) { if (uploadChunk.getLastEnd() < dateUtil.now() - T.hours(3).msecs() - T.mins(1).msecs()) {
SystemClock.sleep(3000) SystemClock.sleep(3000)
aapsLogger.debug(LTag.TIDEPOOL, "Restarting doUpload. Last: " + dateUtil.dateAndTimeString(uploadChunk.getLastEnd())) aapsLogger.debug(LTag.TIDEPOOL, "Restarting doUpload. Last: " + dateUtil.dateAndTimeString(uploadChunk.getLastEnd()))
doUpload() doUpload()
@ -267,15 +275,18 @@ class TidepoolUploader @Inject constructor(
if (session?.datasetReply?.id != null) { if (session?.datasetReply?.id != null) {
extendWakeLock(60000) extendWakeLock(60000)
val call = session!!.service?.deleteDataSet(session!!.token!!, session!!.datasetReply!!.id!!) val call = session!!.service?.deleteDataSet(session!!.token!!, session!!.datasetReply!!.id!!)
call?.enqueue(TidepoolCallback(aapsLogger, rxBus, session!!, "Delete Dataset", { call?.enqueue(TidepoolCallback(
connectionStatus = ConnectionStatus.DISCONNECTED aapsLogger, rxBus, session!!, "Delete Dataset",
rxBus.send(EventTidepoolStatus(("Dataset removed OK"))) {
releaseWakeLock() connectionStatus = ConnectionStatus.DISCONNECTED
}, { rxBus.send(EventTidepoolStatus(("Dataset removed OK")))
connectionStatus = ConnectionStatus.DISCONNECTED releaseWakeLock()
rxBus.send(EventTidepoolStatus(("Dataset remove FAILED"))) }, {
releaseWakeLock() connectionStatus = ConnectionStatus.DISCONNECTED
})) rxBus.send(EventTidepoolStatus(("Dataset remove FAILED")))
releaseWakeLock()
})
)
} else { } else {
aapsLogger.error("Got login response but cannot determine datasetId - cannot proceed") aapsLogger.error("Got login response but cannot determine datasetId - cannot proceed")
} }
@ -292,15 +303,19 @@ class TidepoolUploader @Inject constructor(
requireNotNull(userId) requireNotNull(userId)
extendWakeLock(60000) extendWakeLock(60000)
val call = session.service?.deleteAllData(token, userId) val call = session.service?.deleteAllData(token, userId)
call?.enqueue(TidepoolCallback(aapsLogger, rxBus, session, "Delete all data", { call?.enqueue(
connectionStatus = ConnectionStatus.DISCONNECTED TidepoolCallback(
rxBus.send(EventTidepoolStatus(("All data removed OK"))) aapsLogger, rxBus, session, "Delete all data",
releaseWakeLock() {
}, { connectionStatus = ConnectionStatus.DISCONNECTED
connectionStatus = ConnectionStatus.DISCONNECTED rxBus.send(EventTidepoolStatus(("All data removed OK")))
rxBus.send(EventTidepoolStatus(("All data remove FAILED"))) releaseWakeLock()
releaseWakeLock() }, {
})) connectionStatus = ConnectionStatus.DISCONNECTED
rxBus.send(EventTidepoolStatus(("All data remove FAILED")))
releaseWakeLock()
})
)
} catch (e: IllegalArgumentException) { } catch (e: IllegalArgumentException) {
aapsLogger.error("Got login response but cannot determine userId - cannot proceed") aapsLogger.error("Got login response but cannot determine userId - cannot proceed")
} }
@ -321,13 +336,12 @@ class TidepoolUploader @Inject constructor(
@Synchronized @Synchronized
private fun releaseWakeLock() { private fun releaseWakeLock() {
wl?.let { wl?.let {
if (it.isHeld) { if (it.isHeld)
try { try {
it.release() it.release()
} catch (e: Exception) { } catch (e: Exception) {
aapsLogger.error("Error releasing wakelock: $e") aapsLogger.error("Error releasing wakelock: $e")
} }
}
} }
} }

View file

@ -41,21 +41,21 @@ class UploadChunk @Inject constructor(
private val maxUploadSize = T.days(7).msecs() // don't change this private val maxUploadSize = T.days(7).msecs() // don't change this
fun getNext(session: Session?): String? { fun getNext(session: Session?): String? {
if (session == null) session ?: return null
return null
session.start = getLastEnd() session.start = getLastEnd()
session.end = min(session.start + maxUploadSize, dateUtil.now()) // do not upload last 3h, TBR can be still running
session.end = min(session.start + maxUploadSize, dateUtil.now() - T.hours(3).msecs())
val result = get(session.start, session.end) val result = get(session.start, session.end)
if (result.length < 3) { if (result.length < 3) {
aapsLogger.debug(LTag.TIDEPOOL, "No records in this time period, setting start to best end time") aapsLogger.debug(LTag.TIDEPOOL, "No records in this time period, setting start to best end time")
setLastEnd(max(session.end, getOldestRecordTimeStamp())) setLastEnd(session.end)
} }
return result return result
} }
operator fun get(start: Long, end: Long): String { fun get(start: Long, end: Long): String {
aapsLogger.debug(LTag.TIDEPOOL, "Syncing data between: " + dateUtil.dateAndTimeString(start) + " -> " + dateUtil.dateAndTimeString(end)) aapsLogger.debug(LTag.TIDEPOOL, "Syncing data between: " + dateUtil.dateAndTimeString(start) + " -> " + dateUtil.dateAndTimeString(end))
if (end <= start) { if (end <= start) {
@ -94,21 +94,6 @@ class UploadChunk @Inject constructor(
} }
} }
// numeric limits must match max time windows
private fun getOldestRecordTimeStamp(): Long {
// TODO we could make sure we include records older than the first bg record for completeness
val start: Long = 0
val end = dateUtil.now()
val bgReadingList = repository.compatGetBgReadingsDataFromTime(start, end, true)
.blockingGet()
return if (bgReadingList.isNotEmpty())
bgReadingList[0].timestamp
else -1
}
private fun getTreatments(start: Long, end: Long): List<BaseElement> { private fun getTreatments(start: Long, end: Long): List<BaseElement> {
val result = LinkedList<BaseElement>() val result = LinkedList<BaseElement>()
repository.getBolusesDataFromTimeToTime(start, end, true) repository.getBolusesDataFromTimeToTime(start, end, true)

View file

@ -7,7 +7,7 @@ import info.nightscout.interfaces.profile.Profile
import info.nightscout.shared.utils.DateUtil import info.nightscout.shared.utils.DateUtil
import java.util.UUID import java.util.UUID
class BasalElement(tbr: TemporaryBasal, private val profile: Profile, dateUtil: DateUtil) class BasalElement(tbr: TemporaryBasal, profile: Profile, dateUtil: DateUtil)
: BaseElement(tbr.timestamp, UUID.nameUUIDFromBytes(("AAPS-basal" + tbr.timestamp).toByteArray()).toString(), dateUtil) { : BaseElement(tbr.timestamp, UUID.nameUUIDFromBytes(("AAPS-basal" + tbr.timestamp).toByteArray()).toString(), dateUtil) {
internal var timestamp: Long = 0 // not exposed internal var timestamp: Long = 0 // not exposed

View file

@ -11,7 +11,7 @@
<string name="key_ns_device_status_last_synced_id" translatable="false">ns_device_status_last_synced_id</string> <string name="key_ns_device_status_last_synced_id" translatable="false">ns_device_status_last_synced_id</string>
<string name="key_ns_temporary_basal_last_synced_id" translatable="false">ns_temporary_basal_last_synced_id</string> <string name="key_ns_temporary_basal_last_synced_id" translatable="false">ns_temporary_basal_last_synced_id</string>
<string name="key_ns_extended_bolus_last_synced_id" translatable="false">ns_extended_bolus_last_synced_id</string> <string name="key_ns_extended_bolus_last_synced_id" translatable="false">ns_extended_bolus_last_synced_id</string>
<string name="key_ns_profile_switch_last_synced_id" translatable="false">profile_switch_last_synced_id</string> <string name="key_ns_profile_switch_last_synced_id" translatable="false">ns_profile_switch_last_synced_id</string>
<string name="key_ns_effective_profile_switch_last_synced_id" translatable="false">ns_effective_profile_switch_last_synced_id</string> <string name="key_ns_effective_profile_switch_last_synced_id" translatable="false">ns_effective_profile_switch_last_synced_id</string>
<string name="key_ns_offline_event_last_synced_id" translatable="false">ns_offline_event_last_synced_id</string> <string name="key_ns_offline_event_last_synced_id" translatable="false">ns_offline_event_last_synced_id</string>
<string name="key_ns_profile_store_last_synced_timestamp" translatable="false">ns_profile_store_last_synced_timestamp</string> <string name="key_ns_profile_store_last_synced_timestamp" translatable="false">ns_profile_store_last_synced_timestamp</string>