diff --git a/alfred b/alfred deleted file mode 160000 index fb3c590..0000000 --- a/alfred +++ /dev/null @@ -1 +0,0 @@ -Subproject commit fb3c590a0a09fcd85f95dd5512e5d7096efed51e diff --git a/alfred-ingester b/alfred-ingester deleted file mode 160000 index 336177e..0000000 --- a/alfred-ingester +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 336177ef3ac9cda62c8ca6acadc97c0c3ddcef28 diff --git a/alfred-ingester/.github/workflows/semgrep.yml b/alfred-ingester/.github/workflows/semgrep.yml new file mode 100644 index 0000000..d354790 --- /dev/null +++ b/alfred-ingester/.github/workflows/semgrep.yml @@ -0,0 +1,45 @@ +name: Semgrep + +on: + # Scan changed files in PRs, block on new issues only (existing issues ignored) + pull_request: + branches: + - master + - main + - develop + - portal + + # Schedule this job to run at a certain time, using cron syntax + # Note that * is a special character in YAML so you have to quote this string + schedule: + - cron: '00 03 * * 0' # scheduled for 8.30 AM on every sunday + +jobs: + central-semgrep: + name: Static code Analysis + uses: navi-infosec/central-semgrep-action/.github/workflows/central-semgrep.yml@using-token + with: + github-event-number: ${{github.event.number}} + github-event-name: ${{github.event_name}} + github-repository: ${{github.repository}} + github-pr_owner_name: ${{github.event.pull_request.user.login}} + secrets: + READ_SEMGREP_RULES_TOKEN: ${{secrets.READ_SEMGREP_RULES_TOKEN}} + EMAIL_FETCH_TOKEN: ${{secrets.EMAIL_FETCH_TOKEN}} + + run-if-failed: + runs-on: [ self-hosted, Linux ] + needs: [central-semgrep] + if: always() && (needs.semgrep.result == 'failure') + steps: + - name: Create comment + if: ${{ ( github.event.number != '' ) }} + uses: navi-synced-actions/create-or-update-comment@v2 + with: + issue-number: ${{ github.event.pull_request.number }} + body: | + **Vulnerabilities have been discovered in this PR. Please check the vulnerability Analysis section of Semgrep Workflow to understand the security vulnerability. Feel free to reach out to #sast-help for more information ** + + - name: Assign Reviewers + if: ${{ ( github.event.number != '' ) }} + uses: navi-infosec/security-oncall-action@v1.1 \ No newline at end of file diff --git a/alfred-ingester/.gitignore b/alfred-ingester/.gitignore new file mode 100644 index 0000000..549e00a --- /dev/null +++ b/alfred-ingester/.gitignore @@ -0,0 +1,33 @@ +HELP.md +target/ +!.mvn/wrapper/maven-wrapper.jar +!**/src/main/**/target/ +!**/src/test/**/target/ + +### STS ### +.apt_generated +.classpath +.factorypath +.project +.settings +.springBeans +.sts4-cache + +### IntelliJ IDEA ### +.idea +*.iws +*.iml +*.ipr + +### NetBeans ### +/nbproject/private/ +/nbbuild/ +/dist/ +/nbdist/ +/.nb-gradle/ +build/ +!**/src/main/**/build/ +!**/src/test/**/build/ + +### VS Code ### +.vscode/ diff --git a/alfred-ingester/.mvn/wrapper/maven-wrapper.properties b/alfred-ingester/.mvn/wrapper/maven-wrapper.properties new file mode 100644 index 0000000..8f96f52 --- /dev/null +++ b/alfred-ingester/.mvn/wrapper/maven-wrapper.properties @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +wrapperVersion=3.3.2 +distributionType=only-script +distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.9.7/apache-maven-3.9.7-bin.zip diff --git a/alfred-ingester/mvnw b/alfred-ingester/mvnw new file mode 100755 index 0000000..d7c358e --- /dev/null +++ b/alfred-ingester/mvnw @@ -0,0 +1,259 @@ +#!/bin/sh +# ---------------------------------------------------------------------------- +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# ---------------------------------------------------------------------------- + +# ---------------------------------------------------------------------------- +# Apache Maven Wrapper startup batch script, version 3.3.2 +# +# Optional ENV vars +# ----------------- +# JAVA_HOME - location of a JDK home dir, required when download maven via java source +# MVNW_REPOURL - repo url base for downloading maven distribution +# MVNW_USERNAME/MVNW_PASSWORD - user and password for downloading maven +# MVNW_VERBOSE - true: enable verbose log; debug: trace the mvnw script; others: silence the output +# ---------------------------------------------------------------------------- + +set -euf +[ "${MVNW_VERBOSE-}" != debug ] || set -x + +# OS specific support. +native_path() { printf %s\\n "$1"; } +case "$(uname)" in +CYGWIN* | MINGW*) + [ -z "${JAVA_HOME-}" ] || JAVA_HOME="$(cygpath --unix "$JAVA_HOME")" + native_path() { cygpath --path --windows "$1"; } + ;; +esac + +# set JAVACMD and JAVACCMD +set_java_home() { + # For Cygwin and MinGW, ensure paths are in Unix format before anything is touched + if [ -n "${JAVA_HOME-}" ]; then + if [ -x "$JAVA_HOME/jre/sh/java" ]; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + JAVACCMD="$JAVA_HOME/jre/sh/javac" + else + JAVACMD="$JAVA_HOME/bin/java" + JAVACCMD="$JAVA_HOME/bin/javac" + + if [ ! -x "$JAVACMD" ] || [ ! -x "$JAVACCMD" ]; then + echo "The JAVA_HOME environment variable is not defined correctly, so mvnw cannot run." >&2 + echo "JAVA_HOME is set to \"$JAVA_HOME\", but \"\$JAVA_HOME/bin/java\" or \"\$JAVA_HOME/bin/javac\" does not exist." >&2 + return 1 + fi + fi + else + JAVACMD="$( + 'set' +e + 'unset' -f command 2>/dev/null + 'command' -v java + )" || : + JAVACCMD="$( + 'set' +e + 'unset' -f command 2>/dev/null + 'command' -v javac + )" || : + + if [ ! -x "${JAVACMD-}" ] || [ ! -x "${JAVACCMD-}" ]; then + echo "The java/javac command does not exist in PATH nor is JAVA_HOME set, so mvnw cannot run." >&2 + return 1 + fi + fi +} + +# hash string like Java String::hashCode +hash_string() { + str="${1:-}" h=0 + while [ -n "$str" ]; do + char="${str%"${str#?}"}" + h=$(((h * 31 + $(LC_CTYPE=C printf %d "'$char")) % 4294967296)) + str="${str#?}" + done + printf %x\\n $h +} + +verbose() { :; } +[ "${MVNW_VERBOSE-}" != true ] || verbose() { printf %s\\n "${1-}"; } + +die() { + printf %s\\n "$1" >&2 + exit 1 +} + +trim() { + # MWRAPPER-139: + # Trims trailing and leading whitespace, carriage returns, tabs, and linefeeds. + # Needed for removing poorly interpreted newline sequences when running in more + # exotic environments such as mingw bash on Windows. + printf "%s" "${1}" | tr -d '[:space:]' +} + +# parse distributionUrl and optional distributionSha256Sum, requires .mvn/wrapper/maven-wrapper.properties +while IFS="=" read -r key value; do + case "${key-}" in + distributionUrl) distributionUrl=$(trim "${value-}") ;; + distributionSha256Sum) distributionSha256Sum=$(trim "${value-}") ;; + esac +done <"${0%/*}/.mvn/wrapper/maven-wrapper.properties" +[ -n "${distributionUrl-}" ] || die "cannot read distributionUrl property in ${0%/*}/.mvn/wrapper/maven-wrapper.properties" + +case "${distributionUrl##*/}" in +maven-mvnd-*bin.*) + MVN_CMD=mvnd.sh _MVNW_REPO_PATTERN=/maven/mvnd/ + case "${PROCESSOR_ARCHITECTURE-}${PROCESSOR_ARCHITEW6432-}:$(uname -a)" in + *AMD64:CYGWIN* | *AMD64:MINGW*) distributionPlatform=windows-amd64 ;; + :Darwin*x86_64) distributionPlatform=darwin-amd64 ;; + :Darwin*arm64) distributionPlatform=darwin-aarch64 ;; + :Linux*x86_64*) distributionPlatform=linux-amd64 ;; + *) + echo "Cannot detect native platform for mvnd on $(uname)-$(uname -m), use pure java version" >&2 + distributionPlatform=linux-amd64 + ;; + esac + distributionUrl="${distributionUrl%-bin.*}-$distributionPlatform.zip" + ;; +maven-mvnd-*) MVN_CMD=mvnd.sh _MVNW_REPO_PATTERN=/maven/mvnd/ ;; +*) MVN_CMD="mvn${0##*/mvnw}" _MVNW_REPO_PATTERN=/org/apache/maven/ ;; +esac + +# apply MVNW_REPOURL and calculate MAVEN_HOME +# maven home pattern: ~/.m2/wrapper/dists/{apache-maven-,maven-mvnd--}/ +[ -z "${MVNW_REPOURL-}" ] || distributionUrl="$MVNW_REPOURL$_MVNW_REPO_PATTERN${distributionUrl#*"$_MVNW_REPO_PATTERN"}" +distributionUrlName="${distributionUrl##*/}" +distributionUrlNameMain="${distributionUrlName%.*}" +distributionUrlNameMain="${distributionUrlNameMain%-bin}" +MAVEN_USER_HOME="${MAVEN_USER_HOME:-${HOME}/.m2}" +MAVEN_HOME="${MAVEN_USER_HOME}/wrapper/dists/${distributionUrlNameMain-}/$(hash_string "$distributionUrl")" + +exec_maven() { + unset MVNW_VERBOSE MVNW_USERNAME MVNW_PASSWORD MVNW_REPOURL || : + exec "$MAVEN_HOME/bin/$MVN_CMD" "$@" || die "cannot exec $MAVEN_HOME/bin/$MVN_CMD" +} + +if [ -d "$MAVEN_HOME" ]; then + verbose "found existing MAVEN_HOME at $MAVEN_HOME" + exec_maven "$@" +fi + +case "${distributionUrl-}" in +*?-bin.zip | *?maven-mvnd-?*-?*.zip) ;; +*) die "distributionUrl is not valid, must match *-bin.zip or maven-mvnd-*.zip, but found '${distributionUrl-}'" ;; +esac + +# prepare tmp dir +if TMP_DOWNLOAD_DIR="$(mktemp -d)" && [ -d "$TMP_DOWNLOAD_DIR" ]; then + clean() { rm -rf -- "$TMP_DOWNLOAD_DIR"; } + trap clean HUP INT TERM EXIT +else + die "cannot create temp dir" +fi + +mkdir -p -- "${MAVEN_HOME%/*}" + +# Download and Install Apache Maven +verbose "Couldn't find MAVEN_HOME, downloading and installing it ..." +verbose "Downloading from: $distributionUrl" +verbose "Downloading to: $TMP_DOWNLOAD_DIR/$distributionUrlName" + +# select .zip or .tar.gz +if ! command -v unzip >/dev/null; then + distributionUrl="${distributionUrl%.zip}.tar.gz" + distributionUrlName="${distributionUrl##*/}" +fi + +# verbose opt +__MVNW_QUIET_WGET=--quiet __MVNW_QUIET_CURL=--silent __MVNW_QUIET_UNZIP=-q __MVNW_QUIET_TAR='' +[ "${MVNW_VERBOSE-}" != true ] || __MVNW_QUIET_WGET='' __MVNW_QUIET_CURL='' __MVNW_QUIET_UNZIP='' __MVNW_QUIET_TAR=v + +# normalize http auth +case "${MVNW_PASSWORD:+has-password}" in +'') MVNW_USERNAME='' MVNW_PASSWORD='' ;; +has-password) [ -n "${MVNW_USERNAME-}" ] || MVNW_USERNAME='' MVNW_PASSWORD='' ;; +esac + +if [ -z "${MVNW_USERNAME-}" ] && command -v wget >/dev/null; then + verbose "Found wget ... using wget" + wget ${__MVNW_QUIET_WGET:+"$__MVNW_QUIET_WGET"} "$distributionUrl" -O "$TMP_DOWNLOAD_DIR/$distributionUrlName" || die "wget: Failed to fetch $distributionUrl" +elif [ -z "${MVNW_USERNAME-}" ] && command -v curl >/dev/null; then + verbose "Found curl ... using curl" + curl ${__MVNW_QUIET_CURL:+"$__MVNW_QUIET_CURL"} -f -L -o "$TMP_DOWNLOAD_DIR/$distributionUrlName" "$distributionUrl" || die "curl: Failed to fetch $distributionUrl" +elif set_java_home; then + verbose "Falling back to use Java to download" + javaSource="$TMP_DOWNLOAD_DIR/Downloader.java" + targetZip="$TMP_DOWNLOAD_DIR/$distributionUrlName" + cat >"$javaSource" <<-END + public class Downloader extends java.net.Authenticator + { + protected java.net.PasswordAuthentication getPasswordAuthentication() + { + return new java.net.PasswordAuthentication( System.getenv( "MVNW_USERNAME" ), System.getenv( "MVNW_PASSWORD" ).toCharArray() ); + } + public static void main( String[] args ) throws Exception + { + setDefault( new Downloader() ); + java.nio.file.Files.copy( java.net.URI.create( args[0] ).toURL().openStream(), java.nio.file.Paths.get( args[1] ).toAbsolutePath().normalize() ); + } + } + END + # For Cygwin/MinGW, switch paths to Windows format before running javac and java + verbose " - Compiling Downloader.java ..." + "$(native_path "$JAVACCMD")" "$(native_path "$javaSource")" || die "Failed to compile Downloader.java" + verbose " - Running Downloader.java ..." + "$(native_path "$JAVACMD")" -cp "$(native_path "$TMP_DOWNLOAD_DIR")" Downloader "$distributionUrl" "$(native_path "$targetZip")" +fi + +# If specified, validate the SHA-256 sum of the Maven distribution zip file +if [ -n "${distributionSha256Sum-}" ]; then + distributionSha256Result=false + if [ "$MVN_CMD" = mvnd.sh ]; then + echo "Checksum validation is not supported for maven-mvnd." >&2 + echo "Please disable validation by removing 'distributionSha256Sum' from your maven-wrapper.properties." >&2 + exit 1 + elif command -v sha256sum >/dev/null; then + if echo "$distributionSha256Sum $TMP_DOWNLOAD_DIR/$distributionUrlName" | sha256sum -c >/dev/null 2>&1; then + distributionSha256Result=true + fi + elif command -v shasum >/dev/null; then + if echo "$distributionSha256Sum $TMP_DOWNLOAD_DIR/$distributionUrlName" | shasum -a 256 -c >/dev/null 2>&1; then + distributionSha256Result=true + fi + else + echo "Checksum validation was requested but neither 'sha256sum' or 'shasum' are available." >&2 + echo "Please install either command, or disable validation by removing 'distributionSha256Sum' from your maven-wrapper.properties." >&2 + exit 1 + fi + if [ $distributionSha256Result = false ]; then + echo "Error: Failed to validate Maven distribution SHA-256, your Maven distribution might be compromised." >&2 + echo "If you updated your Maven version, you need to update the specified distributionSha256Sum property." >&2 + exit 1 + fi +fi + +# unzip and move +if command -v unzip >/dev/null; then + unzip ${__MVNW_QUIET_UNZIP:+"$__MVNW_QUIET_UNZIP"} "$TMP_DOWNLOAD_DIR/$distributionUrlName" -d "$TMP_DOWNLOAD_DIR" || die "failed to unzip" +else + tar xzf${__MVNW_QUIET_TAR:+"$__MVNW_QUIET_TAR"} "$TMP_DOWNLOAD_DIR/$distributionUrlName" -C "$TMP_DOWNLOAD_DIR" || die "failed to untar" +fi +printf %s\\n "$distributionUrl" >"$TMP_DOWNLOAD_DIR/$distributionUrlNameMain/mvnw.url" +mv -- "$TMP_DOWNLOAD_DIR/$distributionUrlNameMain" "$MAVEN_HOME" || [ -d "$MAVEN_HOME" ] || die "fail to move MAVEN_HOME" + +clean || : +exec_maven "$@" diff --git a/alfred-ingester/mvnw.cmd b/alfred-ingester/mvnw.cmd new file mode 100644 index 0000000..6f779cf --- /dev/null +++ b/alfred-ingester/mvnw.cmd @@ -0,0 +1,149 @@ +<# : batch portion +@REM ---------------------------------------------------------------------------- +@REM Licensed to the Apache Software Foundation (ASF) under one +@REM or more contributor license agreements. See the NOTICE file +@REM distributed with this work for additional information +@REM regarding copyright ownership. The ASF licenses this file +@REM to you under the Apache License, Version 2.0 (the +@REM "License"); you may not use this file except in compliance +@REM with the License. You may obtain a copy of the License at +@REM +@REM https://www.apache.org/licenses/LICENSE-2.0 +@REM +@REM Unless required by applicable law or agreed to in writing, +@REM software distributed under the License is distributed on an +@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +@REM KIND, either express or implied. See the License for the +@REM specific language governing permissions and limitations +@REM under the License. +@REM ---------------------------------------------------------------------------- + +@REM ---------------------------------------------------------------------------- +@REM Apache Maven Wrapper startup batch script, version 3.3.2 +@REM +@REM Optional ENV vars +@REM MVNW_REPOURL - repo url base for downloading maven distribution +@REM MVNW_USERNAME/MVNW_PASSWORD - user and password for downloading maven +@REM MVNW_VERBOSE - true: enable verbose log; others: silence the output +@REM ---------------------------------------------------------------------------- + +@IF "%__MVNW_ARG0_NAME__%"=="" (SET __MVNW_ARG0_NAME__=%~nx0) +@SET __MVNW_CMD__= +@SET __MVNW_ERROR__= +@SET __MVNW_PSMODULEP_SAVE=%PSModulePath% +@SET PSModulePath= +@FOR /F "usebackq tokens=1* delims==" %%A IN (`powershell -noprofile "& {$scriptDir='%~dp0'; $script='%__MVNW_ARG0_NAME__%'; icm -ScriptBlock ([Scriptblock]::Create((Get-Content -Raw '%~f0'))) -NoNewScope}"`) DO @( + IF "%%A"=="MVN_CMD" (set __MVNW_CMD__=%%B) ELSE IF "%%B"=="" (echo %%A) ELSE (echo %%A=%%B) +) +@SET PSModulePath=%__MVNW_PSMODULEP_SAVE% +@SET __MVNW_PSMODULEP_SAVE= +@SET __MVNW_ARG0_NAME__= +@SET MVNW_USERNAME= +@SET MVNW_PASSWORD= +@IF NOT "%__MVNW_CMD__%"=="" (%__MVNW_CMD__% %*) +@echo Cannot start maven from wrapper >&2 && exit /b 1 +@GOTO :EOF +: end batch / begin powershell #> + +$ErrorActionPreference = "Stop" +if ($env:MVNW_VERBOSE -eq "true") { + $VerbosePreference = "Continue" +} + +# calculate distributionUrl, requires .mvn/wrapper/maven-wrapper.properties +$distributionUrl = (Get-Content -Raw "$scriptDir/.mvn/wrapper/maven-wrapper.properties" | ConvertFrom-StringData).distributionUrl +if (!$distributionUrl) { + Write-Error "cannot read distributionUrl property in $scriptDir/.mvn/wrapper/maven-wrapper.properties" +} + +switch -wildcard -casesensitive ( $($distributionUrl -replace '^.*/','') ) { + "maven-mvnd-*" { + $USE_MVND = $true + $distributionUrl = $distributionUrl -replace '-bin\.[^.]*$',"-windows-amd64.zip" + $MVN_CMD = "mvnd.cmd" + break + } + default { + $USE_MVND = $false + $MVN_CMD = $script -replace '^mvnw','mvn' + break + } +} + +# apply MVNW_REPOURL and calculate MAVEN_HOME +# maven home pattern: ~/.m2/wrapper/dists/{apache-maven-,maven-mvnd--}/ +if ($env:MVNW_REPOURL) { + $MVNW_REPO_PATTERN = if ($USE_MVND) { "/org/apache/maven/" } else { "/maven/mvnd/" } + $distributionUrl = "$env:MVNW_REPOURL$MVNW_REPO_PATTERN$($distributionUrl -replace '^.*'+$MVNW_REPO_PATTERN,'')" +} +$distributionUrlName = $distributionUrl -replace '^.*/','' +$distributionUrlNameMain = $distributionUrlName -replace '\.[^.]*$','' -replace '-bin$','' +$MAVEN_HOME_PARENT = "$HOME/.m2/wrapper/dists/$distributionUrlNameMain" +if ($env:MAVEN_USER_HOME) { + $MAVEN_HOME_PARENT = "$env:MAVEN_USER_HOME/wrapper/dists/$distributionUrlNameMain" +} +$MAVEN_HOME_NAME = ([System.Security.Cryptography.MD5]::Create().ComputeHash([byte[]][char[]]$distributionUrl) | ForEach-Object {$_.ToString("x2")}) -join '' +$MAVEN_HOME = "$MAVEN_HOME_PARENT/$MAVEN_HOME_NAME" + +if (Test-Path -Path "$MAVEN_HOME" -PathType Container) { + Write-Verbose "found existing MAVEN_HOME at $MAVEN_HOME" + Write-Output "MVN_CMD=$MAVEN_HOME/bin/$MVN_CMD" + exit $? +} + +if (! $distributionUrlNameMain -or ($distributionUrlName -eq $distributionUrlNameMain)) { + Write-Error "distributionUrl is not valid, must end with *-bin.zip, but found $distributionUrl" +} + +# prepare tmp dir +$TMP_DOWNLOAD_DIR_HOLDER = New-TemporaryFile +$TMP_DOWNLOAD_DIR = New-Item -Itemtype Directory -Path "$TMP_DOWNLOAD_DIR_HOLDER.dir" +$TMP_DOWNLOAD_DIR_HOLDER.Delete() | Out-Null +trap { + if ($TMP_DOWNLOAD_DIR.Exists) { + try { Remove-Item $TMP_DOWNLOAD_DIR -Recurse -Force | Out-Null } + catch { Write-Warning "Cannot remove $TMP_DOWNLOAD_DIR" } + } +} + +New-Item -Itemtype Directory -Path "$MAVEN_HOME_PARENT" -Force | Out-Null + +# Download and Install Apache Maven +Write-Verbose "Couldn't find MAVEN_HOME, downloading and installing it ..." +Write-Verbose "Downloading from: $distributionUrl" +Write-Verbose "Downloading to: $TMP_DOWNLOAD_DIR/$distributionUrlName" + +$webclient = New-Object System.Net.WebClient +if ($env:MVNW_USERNAME -and $env:MVNW_PASSWORD) { + $webclient.Credentials = New-Object System.Net.NetworkCredential($env:MVNW_USERNAME, $env:MVNW_PASSWORD) +} +[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 +$webclient.DownloadFile($distributionUrl, "$TMP_DOWNLOAD_DIR/$distributionUrlName") | Out-Null + +# If specified, validate the SHA-256 sum of the Maven distribution zip file +$distributionSha256Sum = (Get-Content -Raw "$scriptDir/.mvn/wrapper/maven-wrapper.properties" | ConvertFrom-StringData).distributionSha256Sum +if ($distributionSha256Sum) { + if ($USE_MVND) { + Write-Error "Checksum validation is not supported for maven-mvnd. `nPlease disable validation by removing 'distributionSha256Sum' from your maven-wrapper.properties." + } + Import-Module $PSHOME\Modules\Microsoft.PowerShell.Utility -Function Get-FileHash + if ((Get-FileHash "$TMP_DOWNLOAD_DIR/$distributionUrlName" -Algorithm SHA256).Hash.ToLower() -ne $distributionSha256Sum) { + Write-Error "Error: Failed to validate Maven distribution SHA-256, your Maven distribution might be compromised. If you updated your Maven version, you need to update the specified distributionSha256Sum property." + } +} + +# unzip and move +Expand-Archive "$TMP_DOWNLOAD_DIR/$distributionUrlName" -DestinationPath "$TMP_DOWNLOAD_DIR" | Out-Null +Rename-Item -Path "$TMP_DOWNLOAD_DIR/$distributionUrlNameMain" -NewName $MAVEN_HOME_NAME | Out-Null +try { + Move-Item -Path "$TMP_DOWNLOAD_DIR/$MAVEN_HOME_NAME" -Destination $MAVEN_HOME_PARENT | Out-Null +} catch { + if (! (Test-Path -Path "$MAVEN_HOME" -PathType Container)) { + Write-Error "fail to move MAVEN_HOME" + } +} finally { + try { Remove-Item $TMP_DOWNLOAD_DIR -Recurse -Force | Out-Null } + catch { Write-Warning "Cannot remove $TMP_DOWNLOAD_DIR" } +} + +Write-Output "MVN_CMD=$MAVEN_HOME/bin/$MVN_CMD" diff --git a/alfred-ingester/pom.xml b/alfred-ingester/pom.xml new file mode 100644 index 0000000..cab9e58 --- /dev/null +++ b/alfred-ingester/pom.xml @@ -0,0 +1,138 @@ + + + 4.0.0 + + org.springframework.boot + spring-boot-starter-parent + 3.2.4 + + + com.navi + alfred-ingester + 0.0.1-SNAPSHOT + alfred-ingester + Alfred-ingester in Java + + 2.19.0 + 75% + https://nexus.cmd.navi-tech.in + 2023.0.0 + 21 + 1.9.24 + + + + org.springframework.boot + spring-boot-starter-web + + + com.fasterxml.jackson.module + jackson-module-kotlin + + + org.jetbrains.kotlin + kotlin-reflect + + + org.jetbrains.kotlin + kotlin-stdlib + + + + org.springframework.boot + spring-boot-starter-test + test + + + org.jetbrains.kotlin + kotlin-test-junit5 + test + + + + com.bucket4j + bucket4j-core + 8.10.1 + + + org.springframework.boot + spring-boot-starter-actuator + + + org.springframework.boot + spring-boot-starter-logging + + + + + co.elastic.clients + elasticsearch-java + 8.11.1 + + + com.navi.sa.clients + cache + 0.3-SNAPSHOT + + + + + + ${project.basedir}/src/main/kotlin + ${project.basedir}/src/test/kotlin + + + org.springframework.boot + spring-boot-maven-plugin + + + org.jetbrains.kotlin + kotlin-maven-plugin + + + -Xjsr305=strict + + + spring + + + + + org.jetbrains.kotlin + kotlin-maven-allopen + ${kotlin.version} + + + + + + + + nexus + Snapshot + ${nexus.host}/repository/maven-snapshots + + + Release + ${nexus.host}/repository/maven-releases + + + MavenCentral + https://repo.maven.apache.org/maven2/ + + + + + + + org.springframework.cloud + spring-cloud-dependencies + ${spring-cloud.version} + pom + import + + + + + diff --git a/alfred-ingester/src/main/kotlin/com/navi/ingester/AlfredIngesterApplication.kt b/alfred-ingester/src/main/kotlin/com/navi/ingester/AlfredIngesterApplication.kt new file mode 100644 index 0000000..bceceeb --- /dev/null +++ b/alfred-ingester/src/main/kotlin/com/navi/ingester/AlfredIngesterApplication.kt @@ -0,0 +1,11 @@ +package com.navi.ingester + +import org.springframework.boot.autoconfigure.SpringBootApplication +import org.springframework.boot.runApplication + +@SpringBootApplication +class AlfredIngesterApplication + +fun main(args: Array) { + runApplication(*args) +} diff --git a/alfred-ingester/src/main/kotlin/com/navi/ingester/common/Constants.kt b/alfred-ingester/src/main/kotlin/com/navi/ingester/common/Constants.kt new file mode 100644 index 0000000..75336ef --- /dev/null +++ b/alfred-ingester/src/main/kotlin/com/navi/ingester/common/Constants.kt @@ -0,0 +1,8 @@ +package com.navi.ingester.common + +const val APP_VERSION_NAME = "appVersionName" +const val OS_VERSION = "osVersion" +const val DEVICE_ID = "deviceId" +const val APP_VERSION_CODE = "appVersionCode" +const val APP_OS = "appOs" +const val ANDROID_OS = "Android" \ No newline at end of file diff --git a/alfred-ingester/src/main/kotlin/com/navi/ingester/controller/CruiseController.kt b/alfred-ingester/src/main/kotlin/com/navi/ingester/controller/CruiseController.kt new file mode 100644 index 0000000..9ffc059 --- /dev/null +++ b/alfred-ingester/src/main/kotlin/com/navi/ingester/controller/CruiseController.kt @@ -0,0 +1,21 @@ +package com.navi.ingester.controller + +import com.navi.ingester.common.* +import org.springframework.web.bind.annotation.GetMapping +import org.springframework.web.bind.annotation.RequestHeader +import org.springframework.web.bind.annotation.RestController + +@RestController +class CruiseController { + + @GetMapping("/cruise") + fun getCruiseControlConfig( + @RequestHeader(APP_VERSION_NAME) appVersionName: String, + @RequestHeader(OS_VERSION) osVersion: String, + @RequestHeader(DEVICE_ID) deviceId: String, + @RequestHeader(APP_VERSION_CODE) appVersionCode: String?, + @RequestHeader(APP_OS, defaultValue = ANDROID_OS) appOs: String + ) { + + } +} \ No newline at end of file diff --git a/alfred-ingester/src/main/kotlin/com/navi/ingester/controller/PingController.kt b/alfred-ingester/src/main/kotlin/com/navi/ingester/controller/PingController.kt new file mode 100644 index 0000000..465d323 --- /dev/null +++ b/alfred-ingester/src/main/kotlin/com/navi/ingester/controller/PingController.kt @@ -0,0 +1,13 @@ +package com.navi.ingester.controller + +import org.springframework.http.ResponseEntity +import org.springframework.web.bind.annotation.GetMapping +import org.springframework.web.bind.annotation.RestController + +@RestController +class PingController { + @GetMapping("/ping") + fun ping(): ResponseEntity> { + return ResponseEntity.ok(mapOf("success" to true)) + } +} \ No newline at end of file diff --git a/alfred-ingester/src/main/kotlin/com/navi/ingester/dtos/AppEvent.kt b/alfred-ingester/src/main/kotlin/com/navi/ingester/dtos/AppEvent.kt new file mode 100644 index 0000000..d68ec49 --- /dev/null +++ b/alfred-ingester/src/main/kotlin/com/navi/ingester/dtos/AppEvent.kt @@ -0,0 +1,29 @@ +package com.navi.ingester.dtos + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties +import com.fasterxml.jackson.databind.PropertyNamingStrategies +import com.fasterxml.jackson.databind.annotation.JsonNaming +import com.navi.ingester.enums.EventType + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonNaming(PropertyNamingStrategies.SnakeCaseStrategy::class) +data class AppEvent( + val baseAttributes: BaseAttributes, + val events: List +) + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonNaming(PropertyNamingStrategies.SnakeCaseStrategy::class) +data class EventAttributes( + val eventId: String? = null, + val parentSessionId: String? = null, + val sessionId: String? = null, + val screenName: String? = null, + val screenshotTime: Long? = null, + val moduleName: String? = null, + val eventName: String? = null, + val eventTimestamp: Long? = null, + val attributes: Map? = null, + val eventType: EventType? = null, + val zipName: String? = null +) \ No newline at end of file diff --git a/alfred-ingester/src/main/kotlin/com/navi/ingester/dtos/BaseAttributes.kt b/alfred-ingester/src/main/kotlin/com/navi/ingester/dtos/BaseAttributes.kt new file mode 100644 index 0000000..f2a8895 --- /dev/null +++ b/alfred-ingester/src/main/kotlin/com/navi/ingester/dtos/BaseAttributes.kt @@ -0,0 +1,36 @@ +package com.navi.ingester.dtos + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties +import com.fasterxml.jackson.databind.PropertyNamingStrategies +import com.fasterxml.jackson.databind.annotation.JsonNaming + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonNaming(PropertyNamingStrategies.SnakeCaseStrategy::class) +data class BaseAttributes( + val appVersionCode: String? = null, + val appVersionName: String? = null, + val clientTs: Long? = null, + val deviceId: String? = null, + val deviceModel: String? = null, + val deviceManufacturer: String? = null, + val screenResolution: String? = null, + val appOS: String? = null, + val osVersion: String? = null, + val latitude: Float? = null, + val longitude: Float? = null, + val networkType: String? = null, + val agentId: String? = null, + val upTime: Long? = null, + val carrierName: String? = null, + val metadata: Map? = null, + val sessionTimeStamp: Long? = null, + val eventTimestamp: Long? = null, + val sessionId: String? = null, + val parentSessionId: String? = null, + val traceId: String? = null, + val eventEndTimeStamp: Long? = null, + val phoneNumber: String? = null, + val hasErrors: Boolean? = null, + val snapshotPerSecond: Long? = null, + val imageType: String? = null +) diff --git a/alfred-ingester/src/main/kotlin/com/navi/ingester/dtos/DeviceAndNetworkAttributes.kt b/alfred-ingester/src/main/kotlin/com/navi/ingester/dtos/DeviceAndNetworkAttributes.kt new file mode 100644 index 0000000..baed031 --- /dev/null +++ b/alfred-ingester/src/main/kotlin/com/navi/ingester/dtos/DeviceAndNetworkAttributes.kt @@ -0,0 +1,14 @@ +package com.navi.ingester.dtos + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties +import com.fasterxml.jackson.databind.PropertyNamingStrategies +import com.fasterxml.jackson.databind.annotation.JsonNaming + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonNaming(PropertyNamingStrategies.SnakeCaseStrategy::class) +data class DeviceAndNetworkAttributes( + val deviceId: String, + val networkType: String, + val networkStrength: Double, + val deviceAttributes: DeviceAttributes +) diff --git a/alfred-ingester/src/main/kotlin/com/navi/ingester/dtos/DeviceAttributes.kt b/alfred-ingester/src/main/kotlin/com/navi/ingester/dtos/DeviceAttributes.kt new file mode 100644 index 0000000..7a1cb3b --- /dev/null +++ b/alfred-ingester/src/main/kotlin/com/navi/ingester/dtos/DeviceAttributes.kt @@ -0,0 +1,14 @@ +package com.navi.ingester.dtos + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties +import com.fasterxml.jackson.databind.PropertyNamingStrategies +import com.fasterxml.jackson.databind.annotation.JsonNaming + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonNaming(PropertyNamingStrategies.SnakeCaseStrategy::class) +data class DeviceAttributes( + val battery: Double, + val cpu: Double? = null, + val storage: Double? = null, + val memory: Double? = null +) diff --git a/alfred-ingester/src/main/kotlin/com/navi/ingester/dtos/SessionUploadRequest.kt b/alfred-ingester/src/main/kotlin/com/navi/ingester/dtos/SessionUploadRequest.kt new file mode 100644 index 0000000..a1bf5ce --- /dev/null +++ b/alfred-ingester/src/main/kotlin/com/navi/ingester/dtos/SessionUploadRequest.kt @@ -0,0 +1,20 @@ +package com.navi.ingester.dtos + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties +import com.fasterxml.jackson.databind.PropertyNamingStrategies +import com.fasterxml.jackson.databind.annotation.JsonNaming + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonNaming(PropertyNamingStrategies.SnakeCaseStrategy::class) +data class SessionUploadRequest( + val baseAttributes: BaseAttributes, + val sessionUploadEventAttributes: SessionUploadEventAttributes +) + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonNaming(PropertyNamingStrategies.SnakeCaseStrategy::class) +data class SessionUploadEventAttributes( + val beginningDeviceAttributes: DeviceAttributes, + val endDeviceAttributes: DeviceAttributes, + val eventId: String +) \ No newline at end of file diff --git a/alfred-ingester/src/main/kotlin/com/navi/ingester/enums/EventType.kt b/alfred-ingester/src/main/kotlin/com/navi/ingester/enums/EventType.kt new file mode 100644 index 0000000..09e2545 --- /dev/null +++ b/alfred-ingester/src/main/kotlin/com/navi/ingester/enums/EventType.kt @@ -0,0 +1,15 @@ +package com.navi.ingester.enums + +enum class EventType { + TOUCH_EVENT, + SCROLL_EVENT, + INFO_LOG, + WARN_LOG, + ERROR_LOG, + SESSION_UPLOAD_EVENT, + CRASH_ANALYTICS_EVENT, + ANR_EVENT, + START_RECORDING_EVENT, + STOP_RECORDING_EVENT, + SCREEN_TRANSITION_EVENT +} \ No newline at end of file diff --git a/alfred-ingester/src/main/kotlin/com/navi/ingester/handler/AppHandler.kt b/alfred-ingester/src/main/kotlin/com/navi/ingester/handler/AppHandler.kt new file mode 100644 index 0000000..5c869ab --- /dev/null +++ b/alfred-ingester/src/main/kotlin/com/navi/ingester/handler/AppHandler.kt @@ -0,0 +1,5 @@ +package com.navi.ingester.handler + +interface AppHandler { + +} \ No newline at end of file diff --git a/alfred-ingester/src/main/kotlin/com/navi/ingester/interceptor/InterceptorConfig.kt b/alfred-ingester/src/main/kotlin/com/navi/ingester/interceptor/InterceptorConfig.kt new file mode 100644 index 0000000..b7661d0 --- /dev/null +++ b/alfred-ingester/src/main/kotlin/com/navi/ingester/interceptor/InterceptorConfig.kt @@ -0,0 +1,15 @@ +package com.navi.ingester.interceptor + +import org.springframework.context.annotation.Configuration +import org.springframework.web.servlet.config.annotation.InterceptorRegistry +import org.springframework.web.servlet.config.annotation.WebMvcConfigurer + +@Configuration +class InterceptorConfig( + val rateLimitInterceptor: RateLimitInterceptor +): WebMvcConfigurer { + + override fun addInterceptors(registry: InterceptorRegistry) { + registry.addInterceptor(rateLimitInterceptor) + } +} \ No newline at end of file diff --git a/alfred-ingester/src/main/kotlin/com/navi/ingester/interceptor/RateLimitInterceptor.kt b/alfred-ingester/src/main/kotlin/com/navi/ingester/interceptor/RateLimitInterceptor.kt new file mode 100644 index 0000000..d9ae15d --- /dev/null +++ b/alfred-ingester/src/main/kotlin/com/navi/ingester/interceptor/RateLimitInterceptor.kt @@ -0,0 +1,74 @@ +package com.navi.ingester.interceptor + +import io.github.bucket4j.Bandwidth +import io.github.bucket4j.Bucket +import jakarta.servlet.http.HttpServletRequest +import jakarta.servlet.http.HttpServletResponse +import org.springframework.beans.factory.annotation.Value +import org.springframework.http.HttpStatus +import org.springframework.stereotype.Component +import org.springframework.web.servlet.HandlerInterceptor +import java.time.Duration +import java.util.concurrent.ConcurrentHashMap +import java.util.concurrent.TimeUnit + +@Component +class RateLimitInterceptor( + private val buckets: ConcurrentHashMap = ConcurrentHashMap(), + @Value("\${rate.limit.capacity}") val rateLimitCapacity: Long, + @Value("\${rate.limit.tokens}") val rateLimitTokens: Long, + @Value("\${rate.limit.refill.time.in.secs}") val rateLimitRefillTimeInSecs: Long, + private val tokensToConsumeByDefault: Int = 1 +) : HandlerInterceptor { + + override fun preHandle( + request: HttpServletRequest, + response: HttpServletResponse, + handler: Any + ): Boolean { + val clientIP = getClientIP(request) + + val bucketByClientIP: Bucket? + if (buckets.containsKey(clientIP)) { + bucketByClientIP = buckets[clientIP] + } else { + bucketByClientIP = getDefaultBucket() + buckets[clientIP] = bucketByClientIP + } + + val probe = bucketByClientIP!!.tryConsumeAndReturnRemaining(tokensToConsumeByDefault.toLong()) + if (probe.isConsumed) { + response.addHeader( + "X-Rate-Limit-Remaining", + probe.remainingTokens.toString() + ) + return true + } + + response.status = HttpStatus.TOO_MANY_REQUESTS.value() // 429 + response.addHeader( + "X-Rate-Limit-Retry-After-Milliseconds", + TimeUnit.NANOSECONDS.toMillis(probe.nanosToWaitForRefill).toString() + ) + + return false + } + + private fun getClientIP(request: HttpServletRequest): String { + var ip = request.getHeader("X-FORWARDED-FOR") + if (ip.isNullOrEmpty()) { + ip = request.remoteAddr + } + + return ip + } + + private fun getDefaultBucket(): Bucket { + return Bucket.builder() + .addLimit(Bandwidth.builder() + .capacity(rateLimitCapacity) + .refillIntervally(rateLimitTokens, Duration.ofSeconds(rateLimitRefillTimeInSecs)) + .build()) + .build() + } +} \ No newline at end of file diff --git a/alfred-ingester/src/main/resources/application-docker.properties b/alfred-ingester/src/main/resources/application-docker.properties new file mode 100644 index 0000000..60e2afb --- /dev/null +++ b/alfred-ingester/src/main/resources/application-docker.properties @@ -0,0 +1,12 @@ +# +# Copyright @ 2022 by Navi Technologies Private Limited +# All rights reserved. Strictly confidential. +# +# Application +spring.config.activate.on-profile=docker +spring.jackson.mapper.default_view_inclusion=true + +# Rate Limiting +rate.limit.capacity=${RATE_LIMIT_CAPACITY} +rate.limit.tokens=${RATE_LIMIT_TOKENS} +rate.limit.refill.time.in.secs=${RATE_LIMIT_REFILL_TIME_IN_SECS} \ No newline at end of file diff --git a/alfred-ingester/src/main/resources/application-local.properties b/alfred-ingester/src/main/resources/application-local.properties new file mode 100644 index 0000000..d8198de --- /dev/null +++ b/alfred-ingester/src/main/resources/application-local.properties @@ -0,0 +1,12 @@ +# +# Copyright @ 2022 by Navi Technologies Private Limited +# All rights reserved. Strictly confidential. +# +# Application +spring.config.activate.on-profile=local +spring.jackson.mapper.default_view_inclusion=true + +# Rate Limiting +rate.limit.capacity=2 +rate.limit.tokens=2 +rate.limit.refill.time.in.secs=60 \ No newline at end of file diff --git a/alfred-ingester/src/main/resources/application.properties b/alfred-ingester/src/main/resources/application.properties new file mode 100644 index 0000000..cee1342 --- /dev/null +++ b/alfred-ingester/src/main/resources/application.properties @@ -0,0 +1,10 @@ +spring.application.name=alfred-ingester +server.port=${PORT:8084} +spring.threads.virtual.enabled=true + +# Metrics related configuration +management.endpoint.metrics.enabled=true +management.endpoints.web.exposure.include=* +management.endpoint.prometheus.enabled=true +management.prometheus.metrics.export.enabled=true +management.server.port=4001 \ No newline at end of file diff --git a/alfred-ingester/src/test/kotlin/com/navi/ingester/AlfredIngesterApplicationTests.kt b/alfred-ingester/src/test/kotlin/com/navi/ingester/AlfredIngesterApplicationTests.kt new file mode 100644 index 0000000..3bd4580 --- /dev/null +++ b/alfred-ingester/src/test/kotlin/com/navi/ingester/AlfredIngesterApplicationTests.kt @@ -0,0 +1,13 @@ +package com.navi.ingester + +import org.junit.jupiter.api.Test +import org.springframework.boot.test.context.SpringBootTest + +@SpringBootTest +class AlfredIngesterApplicationTests { + + @Test + fun contextLoads() { + } + +} diff --git a/alfred-web-session-recorder b/alfred-web-session-recorder deleted file mode 160000 index 5505791..0000000 --- a/alfred-web-session-recorder +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 55057919b473ddf0b5316ab79726bec05821e394 diff --git a/alfred-web-session-recorder/.github/CODEOWNERS b/alfred-web-session-recorder/.github/CODEOWNERS new file mode 100644 index 0000000..a01ffa3 --- /dev/null +++ b/alfred-web-session-recorder/.github/CODEOWNERS @@ -0,0 +1 @@ +* @apoorva-gupta_navi @lokesh-dugar_navi @lalit-garghate_navi \ No newline at end of file diff --git a/alfred-web-session-recorder/.github/workflows/publish-package.yml b/alfred-web-session-recorder/.github/workflows/publish-package.yml new file mode 100644 index 0000000..fb0f0a8 --- /dev/null +++ b/alfred-web-session-recorder/.github/workflows/publish-package.yml @@ -0,0 +1,39 @@ +name: Publish Package +on: + workflow_dispatch: + +jobs: + publish_package: + runs-on: [default] + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-node@v3 + with: + node-version: '16.x' + registry-url: 'https://nexus.cmd.navi-tech.in/repository/navi-commons' + cache: 'npm' + env: + NODE_AUTH_TOKEN: ${{ secrets.NAVI_COMMONS_NPM_AUTH_TOKEN }} + - name: Setup tsc + run: npm install -g typescript + env: + NODE_AUTH_TOKEN: ${{ secrets.NAVI_COMMONS_NPM_AUTH_TOKEN }} + - name: Setup yarn + run: npm install -g yarn + env: + NODE_AUTH_TOKEN: ${{ secrets.NAVI_COMMONS_NPM_AUTH_TOKEN }} + - name: yarn install + run: yarn install + env: + NODE_AUTH_TOKEN: ${{ secrets.NAVI_COMMONS_NPM_AUTH_TOKEN }} + - uses: actions/setup-node@v3 + with: + node-version: '16.x' + registry-url: 'https://nexus.cmd.navi-tech.in/repository/npm-packages/' + env: + NODE_AUTH_TOKEN: ${{ secrets.NAVI_COMMONS_NPM_AUTH_TOKEN }} + - name: Publishing Package + run: | + npm publish + env: + NODE_AUTH_TOKEN: ${{ secrets.NAVI_COMMONS_NPM_AUTH_TOKEN }} \ No newline at end of file diff --git a/alfred-web-session-recorder/.github/workflows/semgrep.yml b/alfred-web-session-recorder/.github/workflows/semgrep.yml new file mode 100644 index 0000000..d354790 --- /dev/null +++ b/alfred-web-session-recorder/.github/workflows/semgrep.yml @@ -0,0 +1,45 @@ +name: Semgrep + +on: + # Scan changed files in PRs, block on new issues only (existing issues ignored) + pull_request: + branches: + - master + - main + - develop + - portal + + # Schedule this job to run at a certain time, using cron syntax + # Note that * is a special character in YAML so you have to quote this string + schedule: + - cron: '00 03 * * 0' # scheduled for 8.30 AM on every sunday + +jobs: + central-semgrep: + name: Static code Analysis + uses: navi-infosec/central-semgrep-action/.github/workflows/central-semgrep.yml@using-token + with: + github-event-number: ${{github.event.number}} + github-event-name: ${{github.event_name}} + github-repository: ${{github.repository}} + github-pr_owner_name: ${{github.event.pull_request.user.login}} + secrets: + READ_SEMGREP_RULES_TOKEN: ${{secrets.READ_SEMGREP_RULES_TOKEN}} + EMAIL_FETCH_TOKEN: ${{secrets.EMAIL_FETCH_TOKEN}} + + run-if-failed: + runs-on: [ self-hosted, Linux ] + needs: [central-semgrep] + if: always() && (needs.semgrep.result == 'failure') + steps: + - name: Create comment + if: ${{ ( github.event.number != '' ) }} + uses: navi-synced-actions/create-or-update-comment@v2 + with: + issue-number: ${{ github.event.pull_request.number }} + body: | + **Vulnerabilities have been discovered in this PR. Please check the vulnerability Analysis section of Semgrep Workflow to understand the security vulnerability. Feel free to reach out to #sast-help for more information ** + + - name: Assign Reviewers + if: ${{ ( github.event.number != '' ) }} + uses: navi-infosec/security-oncall-action@v1.1 \ No newline at end of file diff --git a/alfred-web-session-recorder/.gitignore b/alfred-web-session-recorder/.gitignore new file mode 100644 index 0000000..88332fc --- /dev/null +++ b/alfred-web-session-recorder/.gitignore @@ -0,0 +1,109 @@ +# Logs +logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* +lerna-debug.log* + +# Diagnostic reports (https://nodejs.org/api/report.html) +report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json + +# Runtime data +pids +*.pid +*.seed +*.pid.lock + +# Directory for instrumented libs generated by jscoverage/JSCover +lib-cov + +# Coverage directory used by tools like istanbul +coverage +*.lcov + +# nyc test coverage +.nyc_output + +# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) +.grunt + +# Bower dependency directory (https://bower.io/) +bower_components + +# node-waf configuration +.lock-wscript + +# Compiled binary addons (https://nodejs.org/api/addons.html) +build/Release + +# Dependency directories +node_modules/ +.storage/ +jspm_packages/ + +# TypeScript v1 declaration files +typings/ + +# TypeScript cache +*.tsbuildinfo + +# Optional npm cache directory +.npm + +# Optional eslint cache +.eslintcache + +# Microbundle cache +.rpt2_cache/ +.rts2_cache_cjs/ +.rts2_cache_es/ +.rts2_cache_umd/ + +# Optional REPL history +.node_repl_history + +# Output of 'npm pack' +*.tgz + +# Yarn Integrity file +.yarn-integrity + +# dotenv environment variables file +.env +.env.test + +# parcel-bundler cache (https://parceljs.org/) +.cache + +# Next.js build output +.next + +# Nuxt.js build / generate output +.nuxt +dist + +# Gatsby files +.cache/ +# Comment in the public line in if your project uses Gatsby and *not* Next.js +# https://nextjs.org/blog/next-9-1#public-directory-support +# public + +# vuepress build output +.vuepress/dist + +# Serverless directories +.serverless/ + +# FuseBox cache +.fusebox/ + +# DynamoDB Local files +.dynamodb/ + +# TernJS port file +.tern-port + +.idea + +.DS_Store \ No newline at end of file diff --git a/alfred-web-session-recorder/.npmrc b/alfred-web-session-recorder/.npmrc new file mode 100644 index 0000000..990e7b5 --- /dev/null +++ b/alfred-web-session-recorder/.npmrc @@ -0,0 +1,2 @@ +@navi:registry=https://nexus.cmd.navi-tech.in/repository/npm-packages/ +//https://nexus.cmd.navi-tech.in/repository/npm-packages/:__authToken=NpmToken.1a3d3462-fb82-364c-bc64-0051e24635b3 diff --git a/alfred-web-session-recorder/.prettierignore b/alfred-web-session-recorder/.prettierignore new file mode 100644 index 0000000..53c37a1 --- /dev/null +++ b/alfred-web-session-recorder/.prettierignore @@ -0,0 +1 @@ +dist \ No newline at end of file diff --git a/alfred-web-session-recorder/.prettierrc b/alfred-web-session-recorder/.prettierrc new file mode 100644 index 0000000..9d94fbc --- /dev/null +++ b/alfred-web-session-recorder/.prettierrc @@ -0,0 +1,8 @@ +{ + "singleQuote": false, + "jsxSingleQuote": false, + "tabWidth": 2, + "printWidth": 100, + "trailingComma": "all", + "semi": true +} diff --git a/alfred-web-session-recorder/CODEOWNERS b/alfred-web-session-recorder/CODEOWNERS new file mode 100644 index 0000000..ed58d2c --- /dev/null +++ b/alfred-web-session-recorder/CODEOWNERS @@ -0,0 +1 @@ +* @lokesh-dugar_navi \ No newline at end of file diff --git a/alfred-web-session-recorder/README.md b/alfred-web-session-recorder/README.md new file mode 100644 index 0000000..888afec --- /dev/null +++ b/alfred-web-session-recorder/README.md @@ -0,0 +1,45 @@ +

Welcome to alfred-session-recorder 👋

+ + +> A package that records web sessions, events, and user agents and sends the serialized encrypted data to a specified URL. + + +## 🚀 Usage + +Make sure you have node version > 16 + +Add tokens in .npmrc + +```sh +//https://nexus.cmd.navi-tech.in/repository/npm-packages/:__authToken=NpmToken.1a3d3462-fb82-364c-bc64-0051e24635b3 +``` +Add alfred-session-recorder + +```sh +yarn add @navi/alfred-session-recorder +``` + +Use below code snippet below to start sending recorded data to the backend. + +```sh + const recorder = new SnapshotRecorder({ + apiUrl: 'https://qa-alfred-ingester.np.navi-sa.in', // This will change for prod + projectName: 'your-project-id', // Put Project name + deviceId?: 'custom-device-id' + clientkey : + }); + const cleanup = recorder.startSnapshotRecording(); +``` + +``` + recorder.stopRecording(). // Use it to stop recording make sure to use cleanup accordingly +``` + +Note: apiUrl for Prod will be different, please reach out to the team for the prod link + +## Watch Your Videos here + +``` +https://qa-alfred-ui.np.navi-sa.in/ +``` +--- diff --git a/alfred-web-session-recorder/config.d.ts b/alfred-web-session-recorder/config.d.ts new file mode 100644 index 0000000..a001820 --- /dev/null +++ b/alfred-web-session-recorder/config.d.ts @@ -0,0 +1 @@ +declare module 'rrweb'; diff --git a/alfred-web-session-recorder/package-lock.json b/alfred-web-session-recorder/package-lock.json new file mode 100644 index 0000000..09837ab --- /dev/null +++ b/alfred-web-session-recorder/package-lock.json @@ -0,0 +1,125 @@ +{ + "name": "alfred-web-recording", + "version": "2.0.0-beta", + "lockfileVersion": 1, + "requires": true, + "dependencies": { + "@rrweb/types": { + "version": "2.0.0-alpha.8", + "resolved": "https://registry.npmjs.org/@rrweb/types/-/types-2.0.0-alpha.8.tgz", + "integrity": "sha512-yAr6ZQrgmr7+qZU5DMGqYXnVsolC5epftmZtkOtgFD/bbvCWflNnl09M32hUjttlKCV1ohhmQGioXkCQ37IF7A==", + "requires": { + "rrweb-snapshot": "^2.0.0-alpha.8" + }, + "dependencies": { + "rrweb-snapshot": { + "version": "2.0.0-alpha.8", + "resolved": "https://registry.npmjs.org/rrweb-snapshot/-/rrweb-snapshot-2.0.0-alpha.8.tgz", + "integrity": "sha512-3Rb7c+mnDEADQ8N9qn9SDH5PzCyHlZ1cwZC932qRyt9O8kJWLM11JLYqqEyQCa2FZVQbzH2iAaCgnyM7A32p7A==" + } + } + }, + "@tsconfig/svelte": { + "version": "1.0.13", + "resolved": "https://registry.npmjs.org/@tsconfig/svelte/-/svelte-1.0.13.tgz", + "integrity": "sha512-5lYJP45Xllo4yE/RUBccBT32eBlRDbqN8r1/MIvQbKxW3aFqaYPCNgm8D5V20X4ShHcwvYWNlKg3liDh1MlBoA==" + }, + "@types/css-font-loading-module": { + "version": "0.0.7", + "resolved": "https://registry.npmjs.org/@types/css-font-loading-module/-/css-font-loading-module-0.0.7.tgz", + "integrity": "sha512-nl09VhutdjINdWyXxHWN/w9zlNCfr60JUqJbd24YXUuCwgeL0TpFSdElCwb6cxfB6ybE19Gjj4g0jsgkXxKv1Q==" + }, + "@types/prop-types": { + "version": "15.7.5", + "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.5.tgz", + "integrity": "sha512-JCB8C6SnDoQf0cNycqd/35A7MjcnK+ZTqE7judS6o7utxUCg6imJg3QK2qzHKszlTjcj2cn+NwMB2i96ubpj7w==", + "dev": true + }, + "@types/react": { + "version": "17.0.58", + "resolved": "https://registry.npmjs.org/@types/react/-/react-17.0.58.tgz", + "integrity": "sha512-c1GzVY97P0fGxwGxhYq989j4XwlcHQoto6wQISOC2v6wm3h0PORRWJFHlkRjfGsiG3y1609WdQ+J+tKxvrEd6A==", + "dev": true, + "requires": { + "@types/prop-types": "*", + "@types/scheduler": "*", + "csstype": "^3.0.2" + } + }, + "@types/scheduler": { + "version": "0.16.3", + "resolved": "https://registry.npmjs.org/@types/scheduler/-/scheduler-0.16.3.tgz", + "integrity": "sha512-5cJ8CB4yAx7BH1oMvdU0Jh9lrEXyPkar6F9G/ERswkCuvP4KQZfZkSjcMbAICCpQTN4OuZn8tz0HiKv9TGZgrQ==", + "dev": true + }, + "@xstate/fsm": { + "version": "1.6.5", + "resolved": "https://registry.npmjs.org/@xstate/fsm/-/fsm-1.6.5.tgz", + "integrity": "sha512-b5o1I6aLNeYlU/3CPlj/Z91ybk1gUsKT+5NAJI+2W4UjvS5KLG28K9v5UvNoFVjHV8PajVZ00RH3vnjyQO7ZAw==" + }, + "base64-arraybuffer": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/base64-arraybuffer/-/base64-arraybuffer-1.0.2.tgz", + "integrity": "sha512-I3yl4r9QB5ZRY3XuJVEPfc2XhZO6YweFPI+UovAzn+8/hb3oJ6lnysaFcjVpkCPfVWFUDvoZ8kmVDP7WyRtYtQ==" + }, + "csstype": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.2.tgz", + "integrity": "sha512-I7K1Uu0MBPzaFKg4nI5Q7Vs2t+3gWWW648spaF+Rg7pI9ds18Ugn+lvg4SHczUdKlHI5LWBXyqfS8+DufyBsgQ==", + "dev": true + }, + "fflate": { + "version": "0.4.8", + "resolved": "https://registry.npmjs.org/fflate/-/fflate-0.4.8.tgz", + "integrity": "sha512-FJqqoDBR00Mdj9ppamLa/Y7vxm+PRmNWA67N846RvsoYVMKB4q3y/de5PA7gUmRMYK/8CMz2GDZQmCRN1wBcWA==" + }, + "mitt": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/mitt/-/mitt-3.0.0.tgz", + "integrity": "sha512-7dX2/10ITVyqh4aOSVI9gdape+t9l2/8QxHrFmUXu4EEUpdlxl6RudZUPZoc+zuY2hk1j7XxVroIVIan/pD/SQ==" + }, + "rrdom": { + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/rrdom/-/rrdom-0.1.7.tgz", + "integrity": "sha512-ZLd8f14z9pUy2Hk9y636cNv5Y2BMnNEY99wxzW9tD2BLDfe1xFxtLjB4q/xCBYo6HRe0wofzKzjm4JojmpBfFw==", + "requires": { + "rrweb-snapshot": "^2.0.0-alpha.4" + } + }, + "rrweb": { + "version": "2.0.0-alpha.4", + "resolved": "https://registry.npmjs.org/rrweb/-/rrweb-2.0.0-alpha.4.tgz", + "integrity": "sha512-wEHUILbxDPcNwkM3m4qgPgXAiBJyqCbbOHyVoNEVBJzHszWEFYyTbrZqUdeb1EfmTRC2PsumCIkVcomJ/xcOzA==", + "requires": { + "@rrweb/types": "^2.0.0-alpha.4", + "@types/css-font-loading-module": "0.0.7", + "@xstate/fsm": "^1.4.0", + "base64-arraybuffer": "^1.0.1", + "fflate": "^0.4.4", + "mitt": "^3.0.0", + "rrdom": "^0.1.7", + "rrweb-snapshot": "^2.0.0-alpha.4" + } + }, + "rrweb-player": { + "version": "1.0.0-alpha.4", + "resolved": "https://registry.npmjs.org/rrweb-player/-/rrweb-player-1.0.0-alpha.4.tgz", + "integrity": "sha512-Wlmn9GZ5Fdqa37vd3TzsYdLl/JWEvXNUrLCrYpnOwEgmY409HwVIvvA5aIo7k582LoKgdRCsB87N+f0oWAR0Kg==", + "requires": { + "@tsconfig/svelte": "^1.0.0", + "rrweb": "^2.0.0-alpha.4" + } + }, + "rrweb-snapshot": { + "version": "2.0.0-alpha.4", + "resolved": "https://registry.npmjs.org/rrweb-snapshot/-/rrweb-snapshot-2.0.0-alpha.4.tgz", + "integrity": "sha512-KQ2OtPpXO5jLYqg1OnXS/Hf+EzqnZyP5A+XPqBCjYpj3XIje/Od4gdUwjbFo3cVuWq5Cw5Y1d3/xwgIS7/XpQQ==" + }, + "typescript": { + "version": "4.9.5", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.9.5.tgz", + "integrity": "sha512-1FXk9E2Hm+QzZQ7z+McJiHL4NW1F2EzMu9Nq9i3zAaGqibafqYwCVU6WyWAuyQRRzOlxou8xZSyXLEN8oKj24g==", + "dev": true + } + } +} diff --git a/alfred-web-session-recorder/package.json b/alfred-web-session-recorder/package.json new file mode 100644 index 0000000..6160728 --- /dev/null +++ b/alfred-web-session-recorder/package.json @@ -0,0 +1,41 @@ +{ + "name": "@navi/alfred-session-recorder", + "version": "3.0.2-test", + "description": "session-recorder", + "main": "dist/src/index.js", + "type": "commonjs", + "scripts": { + "watch": "tsc --w -noEmit", + "watch-build": "tsc --w", + "build": "tsc", + "prepublishOnly": "npm run build", + "verdaccio": "verdaccio --config ./verdaccio.yml" + }, + "files": [ + "dist/" + ], + "types": "dist/src/index.d.ts", + "license": "MIT", + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + }, + "devDependencies": { + "@types/pako": "^2.0.0", + "@types/react": "^17.0.18", + "@types/uuid": "^9.0.1", + "prettier": "^3.2.4", + "typescript": "^4.8.3", + "verdaccio": "^5.26.3" + }, + "dependencies": { + "@types/ua-parser-js": "^0.7.39", + "axios": "^1.7.9", + "dayjs": "^1.11.13", + "pako": "^2.1.0", + "rrweb": "^2.0.0-alpha.4", + "uuid": "^9.0.0" + }, + "keywords": [ + "alfred-session-recorder" + ] +} diff --git a/alfred-web-session-recorder/src/constants/index.ts b/alfred-web-session-recorder/src/constants/index.ts new file mode 100644 index 0000000..934975e --- /dev/null +++ b/alfred-web-session-recorder/src/constants/index.ts @@ -0,0 +1,19 @@ +export const DEFAULT_INGEST_EVENT_INTERVAL = 2500; + +export const MAX_VIDEO_LENGTH = 60 * 1000 * 5; // 5 minutes + +export const RR_SAMPLING_CONFIG = { + mousemove: true, + mouseInteraction: true, + scroll: 150, + media: 800, + input: "last", +}; + +export const CS_EVENTS = { + INGEST_FAILURE: "INGEST_FAILURE", + GLOBAL_AXIOS_RESP_ERROR: "GLOBAL_AXIOS_RESP_ERROR", + GLOBAL_AXIOS_REQ_ERROR: "GLOBAL_AXIOS_REQ_ERROR", + S3_UPLOAD_FAILURE: "S3_UPLOAD_FAILURE", + PRE_SIGN_FETCH_FAILURE: "PRE_SIGN_FETCH_FAILURE", +}; diff --git a/alfred-web-session-recorder/src/index.ts b/alfred-web-session-recorder/src/index.ts new file mode 100644 index 0000000..960afd9 --- /dev/null +++ b/alfred-web-session-recorder/src/index.ts @@ -0,0 +1,201 @@ +import { record, pack } from "rrweb"; +import { v4 as uuidv4 } from "uuid"; +import { AxiosError } from "axios"; + +import { + CS_EVENTS, + DEFAULT_INGEST_EVENT_INTERVAL, + MAX_VIDEO_LENGTH, + RR_SAMPLING_CONFIG, +} from "./constants"; + +import { + getAlfredPreSignUrl, + getBrowserInfo, + getErrorObjForClickStream, + getEventIngestApiUrl, + getPackageVersion, +} from "./utils"; +import canUseDOM from "./utils/canUseDom"; +import { postClickStreamEvent } from "./utils/analytics"; +import ApiHelper from "./utils/apiHelper"; + +import { OptionType, EventsDTO, LogAnalyticsEvent, EventPayloadType } from "./types"; +import { DateHelper } from "./utils/dateHelper"; + +const browserInfo = getBrowserInfo(); + +class SnapshotRecorder { + private events: Record>; + private sessionId: string; + private options: any; + private stopFn: any; + private sendEventsTimer: any; + private sessionTimer: any; + private isRecording: boolean; + private startTimeMap: Record; + + constructor(options: OptionType) { + this.events = {}; + this.startTimeMap = {}; + this.sessionId = ""; + this.sendEventsTimer = null; + this.options = options; + this.isRecording = false; + this.sessionTimer = null; + if (canUseDOM) window.analyticsData = options.analyticsData; + window.alfredClientKey = options.clientkey; + } + + private startNewSession = () => { + this.sessionId = uuidv4(); + this.startTimeMap[this.sessionId] = DateHelper.now(); + if (canUseDOM) { + window.alfredSessionId = this.sessionId; + this.options?.onSessionIdChange?.(this.sessionId); + } + this.events[this.sessionId] = []; + }; + + private logAnalytics = (event: LogAnalyticsEvent): void => { + const uri = this.options?.analyticsData?.uri; + const source = this.options?.analyticsData?.source; + const { event_name, attributes } = event; + + if (uri && source) + postClickStreamEvent({ + uri, + source, + event_name, + attributes: { session_id: this.sessionId, ...(attributes || {}) }, + }); + }; + + private getEventBaseAttributes = () => { + return { + ...browserInfo, + session_id: this.sessionId, + client_timestamp: DateHelper.now(), + project_name: this.options?.projectName, + device_id: this.options?.deviceId, + metadata: { + ...this.options?.metaData, + page_url: window?.location?.origin || "-", + package_version: getPackageVersion(), + ...(window?.alfredSessionMetaData || {}), + }, + user_email: this.options?.userEmail, + version: 2, + }; + }; + + private getPresidedUrl = async () => { + return ApiHelper.GET(getAlfredPreSignUrl(this.options?.apiUrl, this.sessionId)); + }; + + private getEventPayload = (): EventPayloadType | undefined => { + const domData = [...(this.events?.[this.sessionId] || [])]; + this.events[this.sessionId] = []; + + const eventData: EventsDTO = { + base_attributes: { ...this.getEventBaseAttributes() }, + session_attribute: { + event_id: uuidv4(), + start_timestamp: this.startTimeMap?.[this.sessionId] || DateHelper.now(), + end_timestamp: DateHelper.now(), + }, + }; + + return { + eventData, + rrwebData: { event_timestamp: DateHelper.now(), dom_events_data: domData }, + }; + }; + + // Ingest event data + private ingestEvents = (eventData: EventsDTO): void => { + const WEB_SESSION_URL = getEventIngestApiUrl(this.options?.apiUrl); + navigator?.sendBeacon?.(WEB_SESSION_URL, JSON.stringify(eventData)); + }; + + private uploadSessionDataToS3 = () => { + const { eventData, rrwebData } = this.getEventPayload() || {}; + const { dom_events_data } = rrwebData || {}; + + if (!dom_events_data?.length) return; + this.getPresidedUrl() + .then((resp) => { + const preSignUrl = resp?.data?.data; + ApiHelper.PUT(preSignUrl, rrwebData) + .then((response) => { + const isErr = response.status !== 200; + if (isErr) throw new Error(`Error uploading to S3: ${response.status}`); + }) + .catch((error: AxiosError) => { + this.logAnalytics({ + event_name: CS_EVENTS.S3_UPLOAD_FAILURE, + attributes: { ...getErrorObjForClickStream(error), session_id: this.sessionId }, + }); + }); + }) + .catch((err: AxiosError) => { + this.logAnalytics({ + event_name: CS_EVENTS.PRE_SIGN_FETCH_FAILURE, + attributes: { message: err, session_id: this.sessionId }, + }); + }); + + if (eventData) this.ingestEvents(eventData); + }; + + private resetSessionTimer = () => { + this.sessionTimer = setInterval(() => { + this.stopRecording(); + this.startRecording(); + }, MAX_VIDEO_LENGTH); + }; + + private startRecording = () => { + if (this.isRecording) return; + + this.startNewSession(); + this.isRecording = true; + this.stopFn = null; + + this.sendEventsTimer = setInterval(() => { + this.uploadSessionDataToS3(); + }, this?.options?.ingestEventInterval || DEFAULT_INGEST_EVENT_INTERVAL); + + this.stopFn = record({ + emit: (event: any) => { + this.events[this.sessionId].push(event); + }, + sampling: { ...RR_SAMPLING_CONFIG }, + packFn: pack, + ...this.options?.sessionConfig, + }); + + this.resetSessionTimer(); + }; + + public stopRecording = () => { + this.uploadSessionDataToS3(); + this.stopFn(); + + // Clean up + if (this.sendEventsTimer) clearInterval(this.sendEventsTimer); + if (this.sessionTimer) clearInterval(this.sessionTimer); + this.sendEventsTimer = null; + this.sessionTimer = null; + this.stopFn = null; + this.isRecording = false; + }; + + public startSnapshotRecording = () => { + this.startRecording(); + // Cleanup function + return () => {}; + }; +} + +export default SnapshotRecorder; diff --git a/alfred-web-session-recorder/src/types/index.ts b/alfred-web-session-recorder/src/types/index.ts new file mode 100644 index 0000000..a820103 --- /dev/null +++ b/alfred-web-session-recorder/src/types/index.ts @@ -0,0 +1,69 @@ +import { DeviceDetailsType } from "../utils"; + +export interface BrowserMetaInfo extends DeviceDetailsType { + browser: string; + screen_resolution: string; +} + +export interface OptionType { + apiUrl?: string; + projectName: string; + deviceId?: string; + userEmail?: string; + metaData?: any; + sessionConfig?: any; + ingestEventInterval?: number; + clientkey: string; + onSessionIdChange?: (sessionId: string) => void; + analyticsData?: { + uri: string; + source: string; + }; +} + +export interface BaseAttributesDTO extends BrowserMetaInfo { + session_id: string; + client_timestamp: number; + device_id?: string; + project_name: string; + user_email: string | undefined; + metadata: any; + version: number; +} + +export interface EventsDTO { + base_attributes: BaseAttributesDTO; + session_attribute: { + event_id: string; + start_timestamp: number; + end_timestamp: number; + }; +} + +export interface EventPayloadType { + eventData: EventsDTO; + rrwebData: { dom_events_data: any[]; event_timestamp: number }; +} + +export interface S3EventDTO { + event_timestamp: string; + dom_events_data: Array; +} + +declare global { + interface Window { + alfredSessionId: string; + alfredSessionMetaData: Record; + alfredClientKey: string; + analyticsData?: { + uri: string; + source: string; + }; + onSessionIdChange?: (sessionId: string) => void; + } +} + +export interface LogAnalyticsEvent { + event_name: string; + attributes?: Record; +} diff --git a/alfred-web-session-recorder/src/utils/analytics.ts b/alfred-web-session-recorder/src/utils/analytics.ts new file mode 100644 index 0000000..7276490 --- /dev/null +++ b/alfred-web-session-recorder/src/utils/analytics.ts @@ -0,0 +1,31 @@ +import { getBrowserInfo, getPackageVersion } from "./index"; +import { DateHelper } from "./dateHelper"; + +interface PostClickStreamEvent { + uri: string; + source: string; + event_name: string; + attributes: Record; +} + +export const postClickStreamEvent = (payload: PostClickStreamEvent): void => { + const { uri, source, event_name, attributes } = payload; + const finalBody = { + source, + client_ts: DateHelper.now(), + events: [ + { + event_name, + timestamp: DateHelper.now(), + attributes: { ...getBrowserInfo(), package_version: getPackageVersion(), ...attributes }, + }, + ], + }; + + try { + const payloadBlob = new Blob([JSON.stringify(finalBody)], { type: "application/json" }); + navigator?.sendBeacon?.(uri, payloadBlob); + } catch (err) { + console.error("Failed to send analytics events", err); + } +}; diff --git a/alfred-web-session-recorder/src/utils/apiHelper.ts b/alfred-web-session-recorder/src/utils/apiHelper.ts new file mode 100644 index 0000000..7d31c75 --- /dev/null +++ b/alfred-web-session-recorder/src/utils/apiHelper.ts @@ -0,0 +1,61 @@ +// @ts-nocheck + +import axios, { AxiosError, AxiosResponse } from "axios"; + +const axiosInstance = axios.create({}); + +// Set up interceptors if needed (optional) +axiosInstance.interceptors.request.use( + (config) => { + // @ts-ignore + config.retry = config?.retry < 5 ? config.retry : 4; + return config; + }, + (error: AxiosError) => { + return Promise.reject(error); + }, +); + +axiosInstance.interceptors.response.use( + (response) => { + return response; + }, + (error: AxiosError) => { + const { config } = error; + + if (!config || config.retry <= 1) return Promise.reject(error); + config.retry -= 1; + + const delayRetryRequest = new Promise((resolve) => { + setTimeout(() => { + resolve(); + }, 0); + }); + + return delayRetryRequest.then(() => axiosInstance(config)); + }, +); + +// Create an ApiHelper object +const ApiHelper = { + POST: async (url: string, data?: any): Promise> => { + return axiosInstance.post(url, data, { + headers: { "X-Api-Key": window?.alfredClientKey }, + }); + }, + GET: async (url: string): Promise> => { + return axiosInstance.get(url, { + headers: { + "X-Api-Key": window?.alfredClientKey, + "Origin": window?.location?.origin + }, + }); + }, + PUT: async (url: string, data?: any): Promise> => { + return axiosInstance.put(url, data, { + headers: { "X-Api-Key": window?.alfredClientKey }, + }); + }, +}; + +export default ApiHelper; diff --git a/alfred-web-session-recorder/src/utils/canUseDom.ts b/alfred-web-session-recorder/src/utils/canUseDom.ts new file mode 100644 index 0000000..14074dd --- /dev/null +++ b/alfred-web-session-recorder/src/utils/canUseDom.ts @@ -0,0 +1,7 @@ +const canUseDOM = !!( + typeof window !== 'undefined' && + window.document && + window.document.createElement +); + +export default canUseDOM; \ No newline at end of file diff --git a/alfred-web-session-recorder/src/utils/dateHelper.ts b/alfred-web-session-recorder/src/utils/dateHelper.ts new file mode 100644 index 0000000..7ac7211 --- /dev/null +++ b/alfred-web-session-recorder/src/utils/dateHelper.ts @@ -0,0 +1,5 @@ +import dayjs from "dayjs"; + +export const DateHelper = { + now: () => dayjs().valueOf(), // Get current time in epoch (milliseconds) +}; diff --git a/alfred-web-session-recorder/src/utils/index.ts b/alfred-web-session-recorder/src/utils/index.ts new file mode 100644 index 0000000..3f91452 --- /dev/null +++ b/alfred-web-session-recorder/src/utils/index.ts @@ -0,0 +1,117 @@ +import { BrowserMetaInfo } from "../types"; + +import UAParser from "ua-parser-js"; +import canUseDOM from "./canUseDom"; +import { v4 as uuidv4 } from "uuid"; +import { AxiosError } from "axios"; +import packageJson from "../../package.json"; + +const parser = new UAParser(); +const parserResults = parser.getResult(); + +export interface DeviceDetailsType { + os: string; + os_version: string; + manufacturer: string; + model: string; +} + +const DEFAULT_BROWSER_INFO = { + manufacturer: "", + model: "", + os: "", + os_version: "", + browser: "unknow", + screen_resolution: "unknow", +}; + +const JSON = ".json"; + +export type BrowserReturnType = + | "Chrome" + | "Mobile Safari" + | "Firefox" + | "Samsung Internet" + | "Brave" + | "Unknown"; + +export const getEventIngestApiUrl = (uri: string): string => { + if (!uri) return uri; + + const url = new URL(uri); + if (url.pathname && url.pathname.includes("ingest/web/sessions")) return uri; + return `${uri}/v2/ingest/web/sessions`; +}; + +export const getAlfredPreSignUrl = (uri: string, sessionId: string): string => { + const fileName = uuidv4(); + return `${uri}/ingest/web/session/pre-sign/${fileName}${JSON}?fileTypeExtension=${JSON}&directoryName=${sessionId}`; +}; + +export const getErrorObjForClickStream = (error: AxiosError) => { + return { + message: error.message, + status: error?.response?.status, + statusText: error?.response?.statusText, + url: error?.config?.url, + code: error.code, + }; +}; + +export const getDeviceDetails = (): DeviceDetailsType => { + return { + os: parserResults.os.name || "unknown", + os_version: parserResults.os.version || "unknown", + manufacturer: parserResults.device.vendor || "unknown", + model: parserResults.device.model || "unknown", + }; +}; + +export const getBrowser = (): BrowserReturnType => { + const userAgent = navigator.userAgent; + let browser = "unknown" as BrowserReturnType; + + if (/chrome/i.test(userAgent)) { + browser = "chrome" as BrowserReturnType; + } else if (/safari/i.test(userAgent)) { + browser = "safari" as BrowserReturnType; + } else if (/firefox/i.test(userAgent)) { + browser = "firefox" as BrowserReturnType; + } + + return browser; +}; + +export const getBrowserName = (): string => { + if (!canUseDOM) { + return "unknown"; + } + let browser: string = getBrowser(); + // navigator.brave is only added in brave browser + // eslint-disable-next-line + // @ts-ignore + if (navigator?.brave) { + browser = "brave"; + } + return browser; +}; + +export const getBrowserInfo = (): BrowserMetaInfo => { + try { + const screenWidth = typeof window !== "undefined" ? window.screen.width : 0; + const screenHeight = typeof window !== "undefined" ? window.screen.height : 0; + const screenResolution = `${screenWidth}x${screenHeight}`; + + return { + browser: getBrowserName(), + screen_resolution: screenResolution, + ...getDeviceDetails(), + }; + } catch (err) { + return DEFAULT_BROWSER_INFO; + } +}; + +export const getPackageVersion = () => { + return packageJson.version; +}; diff --git a/alfred-web-session-recorder/tsconfig.json b/alfred-web-session-recorder/tsconfig.json new file mode 100644 index 0000000..faa2e1a --- /dev/null +++ b/alfred-web-session-recorder/tsconfig.json @@ -0,0 +1,16 @@ +{ + "compilerOptions": { + "resolveJsonModule": true, + "target": "ES2015" /* Specify ECMAScript target version: 'ES3' (default), 'ES5', 'ES2015', 'ES2016', 'ES2017', 'ES2018', 'ES2019', 'ES2020', 'ES2021', or 'ESNEXT'. */, + "module": "commonjs" /* Specify module code generation: 'none', 'commonjs', 'amd', 'system', 'umd', 'es2015', 'es2020', or 'ESNext'. */, + "declaration": true /* Generates corresponding '.d.ts' file. */, + "sourceMap": true /* Generates corresponding '.map' file. */, + "outDir": "./dist" /* Redirect output structure to the directory. */, + "rootDir": "." /* Specify the root directory of input files. Use to control the output directory structure with --outDir. */, + "strict": true /* Enable all strict type-checking options. */, + "esModuleInterop": true /* Enables emit interoperability between CommonJS and ES Modules via creation of namespace objects for all imports. Implies 'allowSyntheticDefaultImports'. */, + "inlineSources": true /* Emit the source alongside the sourcemaps within a single file; requires '--inlineSourceMap' or '--sourceMap' to be set. */, + "skipLibCheck": true /* Skip type checking of declaration files. */, + "forceConsistentCasingInFileNames": true /* Disallow inconsistently-cased references to the same file. */, + }, +} diff --git a/alfred-web-session-recorder/verdaccio.yml b/alfred-web-session-recorder/verdaccio.yml new file mode 100644 index 0000000..b3604d0 --- /dev/null +++ b/alfred-web-session-recorder/verdaccio.yml @@ -0,0 +1,5 @@ +storage: ./.storage +packages: + '**': + access: $anonymous + publish: $anonymous diff --git a/alfred-web-session-recorder/yarn.lock b/alfred-web-session-recorder/yarn.lock new file mode 100644 index 0000000..9411727 --- /dev/null +++ b/alfred-web-session-recorder/yarn.lock @@ -0,0 +1,1896 @@ +# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. +# yarn lockfile v1 + + +"@babel/runtime@^7.15.4": + version "7.22.3" + resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.22.3.tgz#0a7fce51d43adbf0f7b517a71f4c3aaca92ebcbb" + integrity sha512-XsDuspWKLUsxwCp6r7EhsExHtYfbe5oAGQ19kqngTdCPUoPQzOPdUbD/pB9PJiwb2ptYKQDjSJT3R6dC+EPqfQ== + dependencies: + regenerator-runtime "^0.13.11" + +"@rrweb/types@^2.0.0-alpha.4": + version "2.0.0-alpha.15" + resolved "https://registry.yarnpkg.com/@rrweb/types/-/types-2.0.0-alpha.15.tgz#5d158dc1071e529cb0392fb040f57384aa1ddb89" + integrity sha512-NsD2D8oFYT+XZidklW3T/waqDyaUqu+GqBNMZRJ5UQ7hbwtlx0lmt6ZCvKa29cT/6GfEwNYxAQqelodAwRnHTw== + dependencies: + rrweb-snapshot "^2.0.0-alpha.15" + +"@types/css-font-loading-module@0.0.7": + version "0.0.7" + resolved "https://registry.yarnpkg.com/@types/css-font-loading-module/-/css-font-loading-module-0.0.7.tgz#2f98ede46acc0975de85c0b7b0ebe06041d24601" + integrity sha512-nl09VhutdjINdWyXxHWN/w9zlNCfr60JUqJbd24YXUuCwgeL0TpFSdElCwb6cxfB6ybE19Gjj4g0jsgkXxKv1Q== + +"@types/lodash@^4.14.175": + version "4.14.195" + resolved "https://registry.yarnpkg.com/@types/lodash/-/lodash-4.14.195.tgz#bafc975b252eb6cea78882ce8a7b6bf22a6de632" + integrity sha512-Hwx9EUgdwf2GLarOjQp5ZH8ZmblzcbTBC2wtQWNKARBSxM9ezRIAUpeDTgoQRAFB0+8CNWXVA9+MaSOzOF3nPg== + +"@types/pako@^2.0.0": + version "2.0.0" + resolved "https://registry.yarnpkg.com/@types/pako/-/pako-2.0.0.tgz#12ab4c19107528452e73ac99132c875ccd43bdfb" + integrity sha512-10+iaz93qR5WYxTo+PMifD5TSxiOtdRaxBf7INGGXMQgTCu8Z/7GYWYFUOS3q/G0nE5boj1r4FEB+WSy7s5gbA== + +"@types/prop-types@*": + version "15.7.5" + resolved "https://registry.yarnpkg.com/@types/prop-types/-/prop-types-15.7.5.tgz#5f19d2b85a98e9558036f6a3cacc8819420f05cf" + integrity sha512-JCB8C6SnDoQf0cNycqd/35A7MjcnK+ZTqE7judS6o7utxUCg6imJg3QK2qzHKszlTjcj2cn+NwMB2i96ubpj7w== + +"@types/react@^17.0.18": + version "17.0.58" + resolved "https://registry.yarnpkg.com/@types/react/-/react-17.0.58.tgz#c8bbc82114e5c29001548ebe8ed6c4ba4d3c9fb0" + integrity sha512-c1GzVY97P0fGxwGxhYq989j4XwlcHQoto6wQISOC2v6wm3h0PORRWJFHlkRjfGsiG3y1609WdQ+J+tKxvrEd6A== + dependencies: + "@types/prop-types" "*" + "@types/scheduler" "*" + csstype "^3.0.2" + +"@types/scheduler@*": + version "0.16.3" + resolved "https://registry.yarnpkg.com/@types/scheduler/-/scheduler-0.16.3.tgz#cef09e3ec9af1d63d2a6cc5b383a737e24e6dcf5" + integrity sha512-5cJ8CB4yAx7BH1oMvdU0Jh9lrEXyPkar6F9G/ERswkCuvP4KQZfZkSjcMbAICCpQTN4OuZn8tz0HiKv9TGZgrQ== + +"@types/ua-parser-js@^0.7.39": + version "0.7.39" + resolved "https://registry.yarnpkg.com/@types/ua-parser-js/-/ua-parser-js-0.7.39.tgz#832c58e460c9435e4e34bb866e85e9146e12cdbb" + integrity sha512-P/oDfpofrdtF5xw433SPALpdSchtJmY7nsJItf8h3KXqOslkbySh8zq4dSWXH2oTjRvJ5PczVEoCZPow6GicLg== + +"@types/uuid@^9.0.1": + version "9.0.1" + resolved "https://registry.yarnpkg.com/@types/uuid/-/uuid-9.0.1.tgz#98586dc36aee8dacc98cc396dbca8d0429647aa6" + integrity sha512-rFT3ak0/2trgvp4yYZo5iKFEPsET7vKydKF+VRCxlQ9bpheehyAJH89dAkaLEq/j/RZXJIqcgsmPJKUP1Z28HA== + +"@verdaccio/commons-api@10.2.0": + version "10.2.0" + resolved "https://registry.yarnpkg.com/@verdaccio/commons-api/-/commons-api-10.2.0.tgz#3b684c31749837b0574375bb2e10644ecea9fcca" + integrity sha512-F/YZANu4DmpcEV0jronzI7v2fGVWkQ5Mwi+bVmV+ACJ+EzR0c9Jbhtbe5QyLUuzR97t8R5E/Xe53O0cc2LukdQ== + dependencies: + http-errors "2.0.0" + http-status-codes "2.2.0" + +"@verdaccio/config@7.0.0-next.2": + version "7.0.0-next.2" + resolved "https://registry.yarnpkg.com/@verdaccio/config/-/config-7.0.0-next.2.tgz#fd80f66e36edf477bc7fc0b4a0cf05350b4c47b3" + integrity sha512-wpeuvhuvAhJx70h47Xl1nQbz11SuOj5aSLoRL0H30t5Q6fzVKDeCR2umv1lSolGMItWNbm08ctSfxFEMLSld3g== + dependencies: + "@verdaccio/core" "7.0.0-next.2" + "@verdaccio/utils" "7.0.0-next.2" + debug "4.3.4" + js-yaml "4.1.0" + lodash "4.17.21" + minimatch "3.1.2" + yup "0.32.11" + +"@verdaccio/core@7.0.0-next.2": + version "7.0.0-next.2" + resolved "https://registry.yarnpkg.com/@verdaccio/core/-/core-7.0.0-next.2.tgz#6d31d26bfbd5f1b9259cdd4e948225c5b3624a95" + integrity sha512-jv78gxKusJZaNlGH5JFeJh9GCpP+O0E+ebkEs9T9/7xeo7LsPUBDNXUt91LCPHK6iuLwZHIRM2LBM39tolag3w== + dependencies: + ajv "8.12.0" + core-js "3.30.2" + http-errors "2.0.0" + http-status-codes "2.2.0" + process-warning "1.0.0" + semver "7.5.4" + +"@verdaccio/file-locking@10.3.1": + version "10.3.1" + resolved "https://registry.yarnpkg.com/@verdaccio/file-locking/-/file-locking-10.3.1.tgz#cfc2436e0715954e0965f97dfcd87381d116f749" + integrity sha512-oqYLfv3Yg3mAgw9qhASBpjD50osj2AX4IwbkUtyuhhKGyoFU9eZdrbeW6tpnqUnj6yBMtAPm2eGD4BwQuX400g== + dependencies: + lockfile "1.0.4" + +"@verdaccio/file-locking@12.0.0-next.0": + version "12.0.0-next.0" + resolved "https://registry.yarnpkg.com/@verdaccio/file-locking/-/file-locking-12.0.0-next.0.tgz#9e40e675fda081cc65294e39a7aee9d1079ab90c" + integrity sha512-SsjciD/2UpMsUJsEAB4se6gWLnx7JCopdSVGXLvvlKDzHi+y/zQOz0gq2QVirzJ4C+gSRdYd8ts19MOuL1yHgA== + dependencies: + lockfile "1.0.4" + +"@verdaccio/local-storage@10.3.3": + version "10.3.3" + resolved "https://registry.yarnpkg.com/@verdaccio/local-storage/-/local-storage-10.3.3.tgz#fc31eea9e3da2f27e0cfaf5fe713834ed1fab9e9" + integrity sha512-/n0FH+1hxVg80YhYBfJuW7F2AuvLY2fra8/DTCilWDll9Y5yZDxwntZfcKHJLerCA4atrbJtvaqpWkoV3Q9x8w== + dependencies: + "@verdaccio/commons-api" "10.2.0" + "@verdaccio/file-locking" "10.3.1" + "@verdaccio/streams" "10.2.1" + async "3.2.4" + debug "4.3.4" + lodash "4.17.21" + lowdb "1.0.0" + mkdirp "1.0.4" + +"@verdaccio/logger-7@7.0.0-next.2": + version "7.0.0-next.2" + resolved "https://registry.yarnpkg.com/@verdaccio/logger-7/-/logger-7-7.0.0-next.2.tgz#c8919b038bedaf464f1e93e907c6f97518686e84" + integrity sha512-Hm4c/w+vO0+rjs6DoWc48RykEsrKEA55JdTZkxw57/mk7owkCovL5WEQ2vx4F37afHT4fHZn+R6BjWD3o6HtGA== + dependencies: + "@verdaccio/logger-commons" "7.0.0-next.2" + pino "7.11.0" + +"@verdaccio/logger-commons@7.0.0-next.2": + version "7.0.0-next.2" + resolved "https://registry.yarnpkg.com/@verdaccio/logger-commons/-/logger-commons-7.0.0-next.2.tgz#8c6ffc811885c4a474791facb274850edf94bc43" + integrity sha512-BqWnpWHPmUc0tqfzskOoOB0KIfFIRiBYNP+aJ3NeEEKE3DnVcwm3pe5cGvj43lJOxJZqQIOVxNHCoJhFh+TNrA== + dependencies: + "@verdaccio/core" "7.0.0-next.2" + "@verdaccio/logger-prettify" "7.0.0-next.0" + colorette "2.0.20" + debug "4.3.4" + +"@verdaccio/logger-prettify@7.0.0-next.0": + version "7.0.0-next.0" + resolved "https://registry.yarnpkg.com/@verdaccio/logger-prettify/-/logger-prettify-7.0.0-next.0.tgz#9d120b34ff47a5e34f1bb007d1f1e591ff1db596" + integrity sha512-6akvpkzt6ipkk7v3Non0M9KZq7xYF51QMhJPTFA7JU+hW5AqrAnoMsNAWOzoUMvRr/2Ri33U3VWAOwhmf109Pw== + dependencies: + colorette "2.0.20" + dayjs "1.11.7" + lodash "4.17.21" + pino-abstract-transport "1.0.0" + sonic-boom "3.3.0" + +"@verdaccio/middleware@7.0.0-next.2": + version "7.0.0-next.2" + resolved "https://registry.yarnpkg.com/@verdaccio/middleware/-/middleware-7.0.0-next.2.tgz#26d362038b9a19e826f0837a7082cd3128e5317c" + integrity sha512-04Gi5jZAqvBg/Q20nJcgczg8SV4bEvb95X90xHWruzaP4xS0lBelsJwrsc/OzfqAT7iCh4Jn8QDCpdkWzd/Fxw== + dependencies: + "@verdaccio/config" "7.0.0-next.2" + "@verdaccio/core" "7.0.0-next.2" + "@verdaccio/url" "12.0.0-next.2" + "@verdaccio/utils" "7.0.0-next.2" + debug "4.3.4" + express "4.18.2" + express-rate-limit "5.5.1" + lodash "4.17.21" + lru-cache "7.18.3" + mime "2.6.0" + +"@verdaccio/search@7.0.0-next.1": + version "7.0.0-next.1" + resolved "https://registry.yarnpkg.com/@verdaccio/search/-/search-7.0.0-next.1.tgz#6f9ca46262f5b0199c88821883197bb6cd62f0c4" + integrity sha512-LoWi4YVTFTbjEtyAPOfLKZy+neR5ldBzcVWgQJvg9e8fXS+UhQglvu6YWDr2j1yrQqbzzDVfV7YlXf4a3GG6mw== + +"@verdaccio/signature@7.0.0-next.0": + version "7.0.0-next.0" + resolved "https://registry.yarnpkg.com/@verdaccio/signature/-/signature-7.0.0-next.0.tgz#9507b6787c44f1c2f27cf608d12621ea7610d61b" + integrity sha512-9e28xxd/eH1qRd+I+U0QO0af7F+MEFMtcrRapcqYIayk8yGq03cEGoj18LIf+LXyAosu18Y5dTporPz/R6geHg== + dependencies: + debug "4.3.4" + jsonwebtoken "9.0.0" + lodash "4.17.21" + +"@verdaccio/streams@10.2.1": + version "10.2.1" + resolved "https://registry.yarnpkg.com/@verdaccio/streams/-/streams-10.2.1.tgz#9443d24d4f17672b8f8c8e147690557918ed2bcb" + integrity sha512-OojIG/f7UYKxC4dYX8x5ax8QhRx1b8OYUAMz82rUottCuzrssX/4nn5QE7Ank0DUSX3C9l/HPthc4d9uKRJqJQ== + +"@verdaccio/tarball@12.0.0-next.2": + version "12.0.0-next.2" + resolved "https://registry.yarnpkg.com/@verdaccio/tarball/-/tarball-12.0.0-next.2.tgz#578c13148a71b6884f792f5bd3f30ebb674f2688" + integrity sha512-b+pODfSrXEDBFmYwSiV5QXaiDMWpwwlXbwvztYiGr1T0Sqj443vPQ71N+yEvzDbHxnZjA5DUKmfMbWQ+gRCpUw== + dependencies: + "@verdaccio/core" "7.0.0-next.2" + "@verdaccio/url" "12.0.0-next.2" + "@verdaccio/utils" "7.0.0-next.2" + debug "4.3.4" + lodash "4.17.21" + +"@verdaccio/ui-theme@7.0.0-next.2": + version "7.0.0-next.2" + resolved "https://registry.yarnpkg.com/@verdaccio/ui-theme/-/ui-theme-7.0.0-next.2.tgz#59ac53b1c1a0a2833d9b9b694fe33e302511e54f" + integrity sha512-gVXw2DhjRCeJLr6zEARzKMHz/9gqUXQA72tkvnBNKjk+v+jBqaZGF74wm9GTABmllSSkLu8Ki/jdEP3YaPNs5w== + +"@verdaccio/url@12.0.0-next.2": + version "12.0.0-next.2" + resolved "https://registry.yarnpkg.com/@verdaccio/url/-/url-12.0.0-next.2.tgz#c85fe60071881d495aa36d31eb9599180f5426c3" + integrity sha512-1AMBouDosM+LynbF5DjWJKolYzFpmmy2e/Vm3IzmHPS1ecBZ8T4rpaxXbGQng2uzbdeitncwWhYj1UdgIr2zng== + dependencies: + "@verdaccio/core" "7.0.0-next.2" + debug "4.3.4" + lodash "4.17.21" + validator "13.9.0" + +"@verdaccio/utils@7.0.0-next.2": + version "7.0.0-next.2" + resolved "https://registry.yarnpkg.com/@verdaccio/utils/-/utils-7.0.0-next.2.tgz#13ee93ffa56fcd209f111f543e3d0af3cf341c07" + integrity sha512-ZAMu6uYQ8zZ6o+kceh8O5AdDjFfNEvLl7IMK8GsSviVfm4DJIgOwu7IFbkpnEFyzGvSAsmHSx5S2kidNgHwYrQ== + dependencies: + "@verdaccio/core" "7.0.0-next.2" + lodash "4.17.21" + minimatch "3.1.2" + semver "7.5.4" + +"@xstate/fsm@^1.4.0": + version "1.6.5" + resolved "https://registry.yarnpkg.com/@xstate/fsm/-/fsm-1.6.5.tgz#f599e301997ad7e3c572a0b1ff0696898081bea5" + integrity sha512-b5o1I6aLNeYlU/3CPlj/Z91ybk1gUsKT+5NAJI+2W4UjvS5KLG28K9v5UvNoFVjHV8PajVZ00RH3vnjyQO7ZAw== + +JSONStream@1.3.5: + version "1.3.5" + resolved "https://registry.yarnpkg.com/JSONStream/-/JSONStream-1.3.5.tgz#3208c1f08d3a4d99261ab64f92302bc15e111ca0" + integrity sha512-E+iruNOY8VV9s4JEbe1aNEm6MiszPRr/UfcHMz0TQh1BXSxHK+ASV1R6W4HpjBhSeS+54PIsAMCBmwD06LLsqQ== + dependencies: + jsonparse "^1.2.0" + through ">=2.2.7 <3" + +abort-controller@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/abort-controller/-/abort-controller-3.0.0.tgz#eaf54d53b62bae4138e809ca225c8439a6efb392" + integrity sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg== + dependencies: + event-target-shim "^5.0.0" + +accepts@~1.3.5, accepts@~1.3.8: + version "1.3.8" + resolved "https://registry.yarnpkg.com/accepts/-/accepts-1.3.8.tgz#0bf0be125b67014adcb0b0921e62db7bffe16b2e" + integrity sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw== + dependencies: + mime-types "~2.1.34" + negotiator "0.6.3" + +agent-base@6: + version "6.0.2" + resolved "https://registry.yarnpkg.com/agent-base/-/agent-base-6.0.2.tgz#49fff58577cfee3f37176feab4c22e00f86d7f77" + integrity sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ== + dependencies: + debug "4" + +ajv@8.12.0: + version "8.12.0" + resolved "https://registry.yarnpkg.com/ajv/-/ajv-8.12.0.tgz#d1a0527323e22f53562c567c00991577dfbe19d1" + integrity sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA== + dependencies: + fast-deep-equal "^3.1.1" + json-schema-traverse "^1.0.0" + require-from-string "^2.0.2" + uri-js "^4.2.2" + +ajv@^6.12.3: + version "6.12.6" + resolved "https://registry.yarnpkg.com/ajv/-/ajv-6.12.6.tgz#baf5a62e802b07d977034586f8c3baf5adf26df4" + integrity sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g== + dependencies: + fast-deep-equal "^3.1.1" + fast-json-stable-stringify "^2.0.0" + json-schema-traverse "^0.4.1" + uri-js "^4.2.2" + +apache-md5@1.1.8: + version "1.1.8" + resolved "https://registry.yarnpkg.com/apache-md5/-/apache-md5-1.1.8.tgz#ea79c6feb03abfed42b2830dde06f75df5e3bbd9" + integrity sha512-FCAJojipPn0bXjuEpjOOOMN8FZDkxfWWp4JGN9mifU2IhxvKyXZYqpzPHdnTSUpmPDy+tsslB6Z1g+Vg6nVbYA== + +argparse@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/argparse/-/argparse-2.0.1.tgz#246f50f3ca78a3240f6c997e8a9bd1eac49e4b38" + integrity sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q== + +array-flatten@1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/array-flatten/-/array-flatten-1.1.1.tgz#9a5f699051b1e7073328f2a008968b64ea2955d2" + integrity sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg== + +asn1@~0.2.3: + version "0.2.6" + resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.2.6.tgz#0d3a7bb6e64e02a90c0303b31f292868ea09a08d" + integrity sha512-ix/FxPn0MDjeyJ7i/yoHGFt/EX6LyNbxSEhPPXODPL+KB0VPk86UYfL0lMdy+KCnv+fmvIzySwaK5COwqVbWTQ== + dependencies: + safer-buffer "~2.1.0" + +assert-plus@1.0.0, assert-plus@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-1.0.0.tgz#f12e0f3c5d77b0b1cdd9146942e4e96c1e4dd525" + integrity sha512-NfJ4UzBCcQGLDlQq7nHxH+tv3kyZ0hHQqF5BO6J7tNJeP5do1llPr8dZ8zHonfhAu0PHAdMkSo+8o0wxg9lZWw== + +async@3.2.4: + version "3.2.4" + resolved "https://registry.yarnpkg.com/async/-/async-3.2.4.tgz#2d22e00f8cddeb5fde5dd33522b56d1cf569a81c" + integrity sha512-iAB+JbDEGXhyIUavoDl9WP/Jj106Kz9DEn1DPgYw5ruDn0e3Wgi3sKFm55sASdGBNOQB8F59d9qQ7deqrHA8wQ== + +asynckit@^0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79" + integrity sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q== + +atomic-sleep@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/atomic-sleep/-/atomic-sleep-1.0.0.tgz#eb85b77a601fc932cfe432c5acd364a9e2c9075b" + integrity sha512-kNOjDqAh7px0XWNI+4QbzoiR/nTkHAWNud2uvnJquD1/x5a7EQZMJT0AczqK0Qn67oY/TTQ1LbUKajZpp3I9tQ== + +aws-sign2@~0.7.0: + version "0.7.0" + resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.7.0.tgz#b46e890934a9591f2d2f6f86d7e6a9f1b3fe76a8" + integrity sha512-08kcGqnYf/YmjoRhfxyu+CLxBjUtHLXLXX/vUfx9l2LYzG3c1m61nrpyFUZI6zeS+Li/wWMMidD9KgrqtGq3mA== + +aws4@^1.8.0: + version "1.12.0" + resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.12.0.tgz#ce1c9d143389679e253b314241ea9aa5cec980d3" + integrity sha512-NmWvPnx0F1SfrQbYwOi7OeaNGokp9XhzNioJ/CSBs8Qa4vxug81mhJEAVZwxXuBmYB5KDRfMq/F3RR0BIU7sWg== + +axios@^1.7.9: + version "1.7.9" + resolved "https://registry.yarnpkg.com/axios/-/axios-1.7.9.tgz#d7d071380c132a24accda1b2cfc1535b79ec650a" + integrity sha512-LhLcE7Hbiryz8oMDdDptSrWowmB4Bl6RCt6sIJKpRB4XtVf0iEgewX3au/pJqm+Py1kCASkb/FFKjxQaLtxJvw== + dependencies: + follow-redirects "^1.15.6" + form-data "^4.0.0" + proxy-from-env "^1.1.0" + +balanced-match@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.2.tgz#e83e3a7e3f300b34cb9d87f615fa0cbf357690ee" + integrity sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw== + +base64-arraybuffer@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/base64-arraybuffer/-/base64-arraybuffer-1.0.2.tgz#1c37589a7c4b0746e34bd1feb951da2df01c1bdc" + integrity sha512-I3yl4r9QB5ZRY3XuJVEPfc2XhZO6YweFPI+UovAzn+8/hb3oJ6lnysaFcjVpkCPfVWFUDvoZ8kmVDP7WyRtYtQ== + +base64-js@^1.3.1: + version "1.5.1" + resolved "https://registry.yarnpkg.com/base64-js/-/base64-js-1.5.1.tgz#1b1b440160a5bf7ad40b650f095963481903930a" + integrity sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA== + +bcrypt-pbkdf@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz#a4301d389b6a43f9b67ff3ca11a3f6637e360e9e" + integrity sha512-qeFIXtP4MSoi6NLqO12WfqARWWuCKi2Rn/9hJLEmtB5yTNr9DqFWkJRCf2qShWzPeAMRnOgCrq0sg/KLv5ES9w== + dependencies: + tweetnacl "^0.14.3" + +bcryptjs@2.4.3: + version "2.4.3" + resolved "https://registry.yarnpkg.com/bcryptjs/-/bcryptjs-2.4.3.tgz#9ab5627b93e60621ff7cdac5da9733027df1d0cb" + integrity sha512-V/Hy/X9Vt7f3BbPJEi8BdVFMByHi+jNXrYkW3huaybV/kQ0KJg0Y6PkEMbn+zeT+i+SiKZ/HMqJGIIt4LZDqNQ== + +body-parser@1.20.1: + version "1.20.1" + resolved "https://registry.yarnpkg.com/body-parser/-/body-parser-1.20.1.tgz#b1812a8912c195cd371a3ee5e66faa2338a5c668" + integrity sha512-jWi7abTbYwajOytWCQc37VulmWiRae5RyTpaCyDcS5/lMdtwSz5lOpDE67srw/HYe35f1z3fDQw+3txg7gNtWw== + dependencies: + bytes "3.1.2" + content-type "~1.0.4" + debug "2.6.9" + depd "2.0.0" + destroy "1.2.0" + http-errors "2.0.0" + iconv-lite "0.4.24" + on-finished "2.4.1" + qs "6.11.0" + raw-body "2.5.1" + type-is "~1.6.18" + unpipe "1.0.0" + +brace-expansion@^1.1.7: + version "1.1.11" + resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.11.tgz#3c7fcbf529d87226f3d2f52b966ff5271eb441dd" + integrity sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA== + dependencies: + balanced-match "^1.0.0" + concat-map "0.0.1" + +buffer-equal-constant-time@1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/buffer-equal-constant-time/-/buffer-equal-constant-time-1.0.1.tgz#f8e71132f7ffe6e01a5c9697a4c6f3e48d5cc819" + integrity sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA== + +buffer@^6.0.3: + version "6.0.3" + resolved "https://registry.yarnpkg.com/buffer/-/buffer-6.0.3.tgz#2ace578459cc8fbe2a70aaa8f52ee63b6a74c6c6" + integrity sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA== + dependencies: + base64-js "^1.3.1" + ieee754 "^1.2.1" + +bytes@3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/bytes/-/bytes-3.0.0.tgz#d32815404d689699f85a4ea4fa8755dd13a96048" + integrity sha512-pMhOfFDPiv9t5jjIXkHosWmkSyQbvsgEVNkz0ERHbuLh2T/7j4Mqqpz523Fe8MVY89KC6Sh/QfS2sM+SjgFDcw== + +bytes@3.1.2: + version "3.1.2" + resolved "https://registry.yarnpkg.com/bytes/-/bytes-3.1.2.tgz#8b0beeb98605adf1b128fa4386403c009e0221a5" + integrity sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg== + +call-bind@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/call-bind/-/call-bind-1.0.2.tgz#b1d4e89e688119c3c9a903ad30abb2f6a919be3c" + integrity sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA== + dependencies: + function-bind "^1.1.1" + get-intrinsic "^1.0.2" + +caseless@~0.12.0: + version "0.12.0" + resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.12.0.tgz#1b681c21ff84033c826543090689420d187151dc" + integrity sha512-4tYFyifaFfGacoiObjJegolkwSU4xQNGbVgUiNYVUxbQ2x2lUsFvY4hVgVzGiIe6WLOPqycWXA40l+PWsxthUw== + +clipanion@3.2.1: + version "3.2.1" + resolved "https://registry.yarnpkg.com/clipanion/-/clipanion-3.2.1.tgz#2887db4cb232e80ba57cf19347a4e3a1c4a74133" + integrity sha512-dYFdjLb7y1ajfxQopN05mylEpK9ZX0sO1/RfMXdfmwjlIsPkbh4p7A682x++zFPLDCo1x3p82dtljHf5cW2LKA== + dependencies: + typanion "^3.8.0" + +colorette@2.0.20: + version "2.0.20" + resolved "https://registry.yarnpkg.com/colorette/-/colorette-2.0.20.tgz#9eb793e6833067f7235902fcd3b09917a000a95a" + integrity sha512-IfEDxwoWIjkeXL1eXcDiow4UbKjhLdq6/EuSVR9GMN7KVH3r9gQ83e73hsz1Nd1T3ijd5xv1wcWRYO+D6kCI2w== + +combined-stream@^1.0.6, combined-stream@^1.0.8, combined-stream@~1.0.6: + version "1.0.8" + resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.8.tgz#c3d45a8b34fd730631a110a8a2520682b31d5a7f" + integrity sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg== + dependencies: + delayed-stream "~1.0.0" + +compressible@~2.0.16: + version "2.0.18" + resolved "https://registry.yarnpkg.com/compressible/-/compressible-2.0.18.tgz#af53cca6b070d4c3c0750fbd77286a6d7cc46fba" + integrity sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg== + dependencies: + mime-db ">= 1.43.0 < 2" + +compression@1.7.4: + version "1.7.4" + resolved "https://registry.yarnpkg.com/compression/-/compression-1.7.4.tgz#95523eff170ca57c29a0ca41e6fe131f41e5bb8f" + integrity sha512-jaSIDzP9pZVS4ZfQ+TzvtiWhdpFhE2RDHz8QJkpX9SIpLq88VueF5jJw6t+6CUQcAoA6t+x89MLrWAqpfDE8iQ== + dependencies: + accepts "~1.3.5" + bytes "3.0.0" + compressible "~2.0.16" + debug "2.6.9" + on-headers "~1.0.2" + safe-buffer "5.1.2" + vary "~1.1.2" + +concat-map@0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b" + integrity sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg== + +content-disposition@0.5.4: + version "0.5.4" + resolved "https://registry.yarnpkg.com/content-disposition/-/content-disposition-0.5.4.tgz#8b82b4efac82512a02bb0b1dcec9d2c5e8eb5bfe" + integrity sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ== + dependencies: + safe-buffer "5.2.1" + +content-type@~1.0.4: + version "1.0.5" + resolved "https://registry.yarnpkg.com/content-type/-/content-type-1.0.5.tgz#8b773162656d1d1086784c8f23a54ce6d73d7918" + integrity sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA== + +cookie-signature@1.0.6: + version "1.0.6" + resolved "https://registry.yarnpkg.com/cookie-signature/-/cookie-signature-1.0.6.tgz#e303a882b342cc3ee8ca513a79999734dab3ae2c" + integrity sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ== + +cookie@0.5.0: + version "0.5.0" + resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.5.0.tgz#d1f5d71adec6558c58f389987c366aa47e994f8b" + integrity sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw== + +cookies@0.8.0: + version "0.8.0" + resolved "https://registry.yarnpkg.com/cookies/-/cookies-0.8.0.tgz#1293ce4b391740a8406e3c9870e828c4b54f3f90" + integrity sha512-8aPsApQfebXnuI+537McwYsDtjVxGm8gTIzQI3FDW6t5t/DAhERxtnbEPN/8RX+uZthoz4eCOgloXaE5cYyNow== + dependencies: + depd "~2.0.0" + keygrip "~1.1.0" + +core-js@3.30.2: + version "3.30.2" + resolved "https://registry.yarnpkg.com/core-js/-/core-js-3.30.2.tgz#6528abfda65e5ad728143ea23f7a14f0dcf503fc" + integrity sha512-uBJiDmwqsbJCWHAwjrx3cvjbMXP7xD72Dmsn5LOJpiRmE3WbBbN5rCqQ2Qh6Ek6/eOrjlWngEynBWo4VxerQhg== + +core-util-is@1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.2.tgz#b5fd54220aa2bc5ab57aab7140c940754503c1a7" + integrity sha512-3lqz5YjWTYnW6dlDa5TLaTCcShfar1e40rmcJVwCBJC6mWlFuj0eCHIElmG1g5kyuJ/GD+8Wn4FFCcz4gJPfaQ== + +cors@2.8.5: + version "2.8.5" + resolved "https://registry.yarnpkg.com/cors/-/cors-2.8.5.tgz#eac11da51592dd86b9f06f6e7ac293b3df875d29" + integrity sha512-KIHbLJqu73RGr/hnbrO9uBeixNGuvSQjul/jdFvS/KFSIH1hWVd1ng7zOHx+YrEfInLG7q4n6GHQ9cDtxv/P6g== + dependencies: + object-assign "^4" + vary "^1" + +csstype@^3.0.2: + version "3.1.2" + resolved "https://registry.yarnpkg.com/csstype/-/csstype-3.1.2.tgz#1d4bf9d572f11c14031f0436e1c10bc1f571f50b" + integrity sha512-I7K1Uu0MBPzaFKg4nI5Q7Vs2t+3gWWW648spaF+Rg7pI9ds18Ugn+lvg4SHczUdKlHI5LWBXyqfS8+DufyBsgQ== + +dashdash@^1.12.0: + version "1.14.1" + resolved "https://registry.yarnpkg.com/dashdash/-/dashdash-1.14.1.tgz#853cfa0f7cbe2fed5de20326b8dd581035f6e2f0" + integrity sha512-jRFi8UDGo6j+odZiEpjazZaWqEal3w/basFjQHQEwVtZJGDpxbH1MeYluwCS8Xq5wmLJooDlMgvVarmWfGM44g== + dependencies: + assert-plus "^1.0.0" + +dayjs@1.11.7: + version "1.11.7" + resolved "https://registry.yarnpkg.com/dayjs/-/dayjs-1.11.7.tgz#4b296922642f70999544d1144a2c25730fce63e2" + integrity sha512-+Yw9U6YO5TQohxLcIkrXBeY73WP3ejHWVvx8XCk3gxvQDCTEmS48ZrSZCKciI7Bhl/uCMyxYtE9UqRILmFphkQ== + +dayjs@^1.11.13: + version "1.11.13" + resolved "https://registry.yarnpkg.com/dayjs/-/dayjs-1.11.13.tgz#92430b0139055c3ebb60150aa13e860a4b5a366c" + integrity sha512-oaMBel6gjolK862uaPQOVTA7q3TZhuSvuMQAAglQDOWYO9A91IrAOUJEyKVlqJlHE0vq5p5UXxzdPfMH/x6xNg== + +debug@2.6.9: + version "2.6.9" + resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.9.tgz#5d128515df134ff327e90a4c93f4e077a536341f" + integrity sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA== + dependencies: + ms "2.0.0" + +debug@4, debug@4.3.4, debug@^4.3.4: + version "4.3.4" + resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.4.tgz#1319f6579357f2338d3337d2cdd4914bb5dcc865" + integrity sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ== + dependencies: + ms "2.1.2" + +delayed-stream@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619" + integrity sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ== + +depd@2.0.0, depd@~2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/depd/-/depd-2.0.0.tgz#b696163cc757560d09cf22cc8fad1571b79e76df" + integrity sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw== + +destroy@1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/destroy/-/destroy-1.2.0.tgz#4803735509ad8be552934c67df614f94e66fa015" + integrity sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg== + +duplexify@^4.1.2: + version "4.1.2" + resolved "https://registry.yarnpkg.com/duplexify/-/duplexify-4.1.2.tgz#18b4f8d28289132fa0b9573c898d9f903f81c7b0" + integrity sha512-fz3OjcNCHmRP12MJoZMPglx8m4rrFP8rovnk4vT8Fs+aonZoCwGg10dSsQsfP/E62eZcPTMSMP6686fu9Qlqtw== + dependencies: + end-of-stream "^1.4.1" + inherits "^2.0.3" + readable-stream "^3.1.1" + stream-shift "^1.0.0" + +ecc-jsbn@~0.1.1: + version "0.1.2" + resolved "https://registry.yarnpkg.com/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz#3a83a904e54353287874c564b7549386849a98c9" + integrity sha512-eh9O+hwRHNbG4BLTjEl3nw044CkGm5X6LoaCf7LPp7UU8Qrt47JYNi6nPX8xjW97TKGKm1ouctg0QSpZe9qrnw== + dependencies: + jsbn "~0.1.0" + safer-buffer "^2.1.0" + +ecdsa-sig-formatter@1.0.11: + version "1.0.11" + resolved "https://registry.yarnpkg.com/ecdsa-sig-formatter/-/ecdsa-sig-formatter-1.0.11.tgz#ae0f0fa2d85045ef14a817daa3ce9acd0489e5bf" + integrity sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ== + dependencies: + safe-buffer "^5.0.1" + +ee-first@1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.1.1.tgz#590c61156b0ae2f4f0255732a158b266bc56b21d" + integrity sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow== + +encodeurl@~1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/encodeurl/-/encodeurl-1.0.2.tgz#ad3ff4c86ec2d029322f5a02c3a9a606c95b3f59" + integrity sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w== + +end-of-stream@^1.4.1: + version "1.4.4" + resolved "https://registry.yarnpkg.com/end-of-stream/-/end-of-stream-1.4.4.tgz#5ae64a5f45057baf3626ec14da0ca5e4b2431eb0" + integrity sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q== + dependencies: + once "^1.4.0" + +envinfo@7.10.0: + version "7.10.0" + resolved "https://registry.yarnpkg.com/envinfo/-/envinfo-7.10.0.tgz#55146e3909cc5fe63c22da63fb15b05aeac35b13" + integrity sha512-ZtUjZO6l5mwTHvc1L9+1q5p/R3wTopcfqMW8r5t8SJSKqeVI/LtajORwRFEKpEFuekjD0VBjwu1HMxL4UalIRw== + +escape-html@~1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/escape-html/-/escape-html-1.0.3.tgz#0258eae4d3d0c0974de1c169188ef0051d1d1988" + integrity sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow== + +etag@~1.8.1: + version "1.8.1" + resolved "https://registry.yarnpkg.com/etag/-/etag-1.8.1.tgz#41ae2eeb65efa62268aebfea83ac7d79299b0887" + integrity sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg== + +event-target-shim@^5.0.0: + version "5.0.1" + resolved "https://registry.yarnpkg.com/event-target-shim/-/event-target-shim-5.0.1.tgz#5d4d3ebdf9583d63a5333ce2deb7480ab2b05789" + integrity sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ== + +events@^3.3.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/events/-/events-3.3.0.tgz#31a95ad0a924e2d2c419a813aeb2c4e878ea7400" + integrity sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q== + +express-rate-limit@5.5.1: + version "5.5.1" + resolved "https://registry.yarnpkg.com/express-rate-limit/-/express-rate-limit-5.5.1.tgz#110c23f6a65dfa96ab468eda95e71697bc6987a2" + integrity sha512-MTjE2eIbHv5DyfuFz4zLYWxpqVhEhkTiwFGuB74Q9CSou2WHO52nlE5y3Zlg6SIsiYUIPj6ifFxnkPz6O3sIUg== + +express@4.18.2: + version "4.18.2" + resolved "https://registry.yarnpkg.com/express/-/express-4.18.2.tgz#3fabe08296e930c796c19e3c516979386ba9fd59" + integrity sha512-5/PsL6iGPdfQ/lKM1UuielYgv3BUoJfz1aUwU9vHZ+J7gyvwdQXFEBIEIaxeGf0GIcreATNyBExtalisDbuMqQ== + dependencies: + accepts "~1.3.8" + array-flatten "1.1.1" + body-parser "1.20.1" + content-disposition "0.5.4" + content-type "~1.0.4" + cookie "0.5.0" + cookie-signature "1.0.6" + debug "2.6.9" + depd "2.0.0" + encodeurl "~1.0.2" + escape-html "~1.0.3" + etag "~1.8.1" + finalhandler "1.2.0" + fresh "0.5.2" + http-errors "2.0.0" + merge-descriptors "1.0.1" + methods "~1.1.2" + on-finished "2.4.1" + parseurl "~1.3.3" + path-to-regexp "0.1.7" + proxy-addr "~2.0.7" + qs "6.11.0" + range-parser "~1.2.1" + safe-buffer "5.2.1" + send "0.18.0" + serve-static "1.15.0" + setprototypeof "1.2.0" + statuses "2.0.1" + type-is "~1.6.18" + utils-merge "1.0.1" + vary "~1.1.2" + +extend@~3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.2.tgz#f8b1136b4071fbd8eb140aff858b1019ec2915fa" + integrity sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g== + +extsprintf@1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.3.0.tgz#96918440e3041a7a414f8c52e3c574eb3c3e1e05" + integrity sha512-11Ndz7Nv+mvAC1j0ktTa7fAb0vLyGGX+rMHNBYQviQDGU0Hw7lhctJANqbPhu9nV9/izT/IntTgZ7Im/9LJs9g== + +extsprintf@^1.2.0: + version "1.4.1" + resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.4.1.tgz#8d172c064867f235c0c84a596806d279bf4bcc07" + integrity sha512-Wrk35e8ydCKDj/ArClo1VrPVmN8zph5V4AtHwIuHhvMXsKf73UT3BOD+azBIW+3wOJ4FhEH7zyaJCFvChjYvMA== + +fast-deep-equal@^3.1.1: + version "3.1.3" + resolved "https://registry.yarnpkg.com/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz#3a7d56b559d6cbc3eb512325244e619a65c6c525" + integrity sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q== + +fast-json-stable-stringify@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz#874bf69c6f404c2b5d99c481341399fd55892633" + integrity sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw== + +fast-redact@^3.0.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/fast-redact/-/fast-redact-3.2.0.tgz#b1e2d39bc731376d28bde844454fa23e26919987" + integrity sha512-zaTadChr+NekyzallAMXATXLOR8MNx3zqpZ0MUF2aGf4EathnG0f32VLODNlY8IuGY3HoRO2L6/6fSzNsLaHIw== + +fast-safe-stringify@2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/fast-safe-stringify/-/fast-safe-stringify-2.1.1.tgz#c406a83b6e70d9e35ce3b30a81141df30aeba884" + integrity sha512-W+KJc2dmILlPplD/H4K9l9LcAHAfPtP6BY84uVLXQ6Evcz9Lcg33Y2z1IVblT6xdY54PXYVHEv+0Wpq8Io6zkA== + +fflate@^0.4.4: + version "0.4.8" + resolved "https://registry.yarnpkg.com/fflate/-/fflate-0.4.8.tgz#f90b82aefbd8ac174213abb338bd7ef848f0f5ae" + integrity sha512-FJqqoDBR00Mdj9ppamLa/Y7vxm+PRmNWA67N846RvsoYVMKB4q3y/de5PA7gUmRMYK/8CMz2GDZQmCRN1wBcWA== + +finalhandler@1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/finalhandler/-/finalhandler-1.2.0.tgz#7d23fe5731b207b4640e4fcd00aec1f9207a7b32" + integrity sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg== + dependencies: + debug "2.6.9" + encodeurl "~1.0.2" + escape-html "~1.0.3" + on-finished "2.4.1" + parseurl "~1.3.3" + statuses "2.0.1" + unpipe "~1.0.0" + +follow-redirects@^1.15.6: + version "1.15.9" + resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.15.9.tgz#a604fa10e443bf98ca94228d9eebcc2e8a2c8ee1" + integrity sha512-gew4GsXizNgdoRyqmyfMHyAmXsZDk6mHkSxZFCzW9gwlbtOW44CDtYavM+y+72qD/Vq2l550kMF52DT8fOLJqQ== + +forever-agent@~0.6.1: + version "0.6.1" + resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.6.1.tgz#fbc71f0c41adeb37f96c577ad1ed42d8fdacca91" + integrity sha512-j0KLYPhm6zeac4lz3oJ3o65qvgQCcPubiyotZrXqEaG4hNagNYO8qdlUrX5vwqv9ohqeT/Z3j6+yW067yWWdUw== + +form-data@^4.0.0: + version "4.0.1" + resolved "https://registry.yarnpkg.com/form-data/-/form-data-4.0.1.tgz#ba1076daaaa5bfd7e99c1a6cb02aa0a5cff90d48" + integrity sha512-tzN8e4TX8+kkxGPK8D5u0FNmjPUjw3lwC9lSLxxoB/+GtsJG91CO8bSWy73APlgAZzZbXEYZJuxjkHH2w+Ezhw== + dependencies: + asynckit "^0.4.0" + combined-stream "^1.0.8" + mime-types "^2.1.12" + +form-data@~2.3.2: + version "2.3.3" + resolved "https://registry.yarnpkg.com/form-data/-/form-data-2.3.3.tgz#dcce52c05f644f298c6a7ab936bd724ceffbf3a6" + integrity sha512-1lLKB2Mu3aGP1Q/2eCOx0fNbRMe7XdwktwOruhfqqd0rIJWwN4Dh+E3hrPSlDCXnSR7UtZ1N38rVXm+6+MEhJQ== + dependencies: + asynckit "^0.4.0" + combined-stream "^1.0.6" + mime-types "^2.1.12" + +forwarded@0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/forwarded/-/forwarded-0.2.0.tgz#2269936428aad4c15c7ebe9779a84bf0b2a81811" + integrity sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow== + +fresh@0.5.2: + version "0.5.2" + resolved "https://registry.yarnpkg.com/fresh/-/fresh-0.5.2.tgz#3d8cadd90d976569fa835ab1f8e4b23a105605a7" + integrity sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q== + +function-bind@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.1.tgz#a56899d3ea3c9bab874bb9773b7c5ede92f4895d" + integrity sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A== + +get-intrinsic@^1.0.2: + version "1.2.1" + resolved "https://registry.yarnpkg.com/get-intrinsic/-/get-intrinsic-1.2.1.tgz#d295644fed4505fc9cde952c37ee12b477a83d82" + integrity sha512-2DcsyfABl+gVHEfCOaTrWgyt+tb6MSEGmKq+kI5HwLbIYgjgmMcV8KQ41uaKz1xxUcn9tJtgFbQUEVcEbd0FYw== + dependencies: + function-bind "^1.1.1" + has "^1.0.3" + has-proto "^1.0.1" + has-symbols "^1.0.3" + +getpass@^0.1.1: + version "0.1.7" + resolved "https://registry.yarnpkg.com/getpass/-/getpass-0.1.7.tgz#5eff8e3e684d569ae4cb2b1282604e8ba62149fa" + integrity sha512-0fzj9JxOLfJ+XGLhR8ze3unN0KZCgZwiSSDz168VERjK8Wl8kVSdcu2kspd4s4wtAa1y/qrVRiAA0WclVsu0ng== + dependencies: + assert-plus "^1.0.0" + +glob@^6.0.1: + version "6.0.4" + resolved "https://registry.yarnpkg.com/glob/-/glob-6.0.4.tgz#0f08860f6a155127b2fadd4f9ce24b1aab6e4d22" + integrity sha512-MKZeRNyYZAVVVG1oZeLaWie1uweH40m9AZwIwxyPbTSX4hHrVYSzLg0Ro5Z5R7XKkIX+Cc6oD1rqeDJnwsB8/A== + dependencies: + inflight "^1.0.4" + inherits "2" + minimatch "2 || 3" + once "^1.3.0" + path-is-absolute "^1.0.0" + +graceful-fs@^4.1.3: + version "4.2.11" + resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.11.tgz#4183e4e8bf08bb6e05bbb2f7d2e0c8f712ca40e3" + integrity sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ== + +handlebars@4.7.8: + version "4.7.8" + resolved "https://registry.yarnpkg.com/handlebars/-/handlebars-4.7.8.tgz#41c42c18b1be2365439188c77c6afae71c0cd9e9" + integrity sha512-vafaFqs8MZkRrSX7sFVUdo3ap/eNiLnb4IakshzvP56X5Nr1iGKAIqdX6tMlm6HcNRIkr6AxO5jFEoJzzpT8aQ== + dependencies: + minimist "^1.2.5" + neo-async "^2.6.2" + source-map "^0.6.1" + wordwrap "^1.0.0" + optionalDependencies: + uglify-js "^3.1.4" + +har-schema@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/har-schema/-/har-schema-2.0.0.tgz#a94c2224ebcac04782a0d9035521f24735b7ec92" + integrity sha512-Oqluz6zhGX8cyRaTQlFMPw80bSJVG2x/cFb8ZPhUILGgHka9SsokCCOQgpveePerqidZOrT14ipqfJb7ILcW5Q== + +har-validator@~5.1.3: + version "5.1.5" + resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-5.1.5.tgz#1f0803b9f8cb20c0fa13822df1ecddb36bde1efd" + integrity sha512-nmT2T0lljbxdQZfspsno9hgrG3Uir6Ks5afism62poxqBM6sDnMEuPmzTq8XN0OEwqKLLdh1jQI3qyE66Nzb3w== + dependencies: + ajv "^6.12.3" + har-schema "^2.0.0" + +has-proto@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/has-proto/-/has-proto-1.0.1.tgz#1885c1305538958aff469fef37937c22795408e0" + integrity sha512-7qE+iP+O+bgF9clE5+UoBFzE65mlBiVj3tKCrlNQ0Ogwm0BjpT/gK4SlLYDMybDh5I3TCTKnPPa0oMG7JDYrhg== + +has-symbols@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.0.3.tgz#bb7b2c4349251dce87b125f7bdf874aa7c8b39f8" + integrity sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A== + +has@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/has/-/has-1.0.3.tgz#722d7cbfc1f6aa8241f16dd814e011e1f41e8796" + integrity sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw== + dependencies: + function-bind "^1.1.1" + +http-errors@2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-2.0.0.tgz#b7774a1486ef73cf7667ac9ae0858c012c57b9d3" + integrity sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ== + dependencies: + depd "2.0.0" + inherits "2.0.4" + setprototypeof "1.2.0" + statuses "2.0.1" + toidentifier "1.0.1" + +http-signature@~1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/http-signature/-/http-signature-1.2.0.tgz#9aecd925114772f3d95b65a60abb8f7c18fbace1" + integrity sha512-CAbnr6Rz4CYQkLYUtSNXxQPUH2gK8f3iWexVlsnMeD+GjlsQ0Xsy1cOX+mN3dtxYomRy21CiOzU8Uhw6OwncEQ== + dependencies: + assert-plus "^1.0.0" + jsprim "^1.2.2" + sshpk "^1.7.0" + +http-status-codes@2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/http-status-codes/-/http-status-codes-2.2.0.tgz#bb2efe63d941dfc2be18e15f703da525169622be" + integrity sha512-feERVo9iWxvnejp3SEfm/+oNG517npqL2/PIA8ORjyOZjGC7TwCRQsZylciLS64i6pJ0wRYz3rkXLRwbtFa8Ng== + +https-proxy-agent@5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/https-proxy-agent/-/https-proxy-agent-5.0.1.tgz#c59ef224a04fe8b754f3db0063a25ea30d0005d6" + integrity sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA== + dependencies: + agent-base "6" + debug "4" + +iconv-lite@0.4.24: + version "0.4.24" + resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.24.tgz#2022b4b25fbddc21d2f524974a474aafe733908b" + integrity sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA== + dependencies: + safer-buffer ">= 2.1.2 < 3" + +ieee754@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/ieee754/-/ieee754-1.2.1.tgz#8eb7a10a63fff25d15a57b001586d177d1b0d352" + integrity sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA== + +inflight@^1.0.4: + version "1.0.6" + resolved "https://registry.yarnpkg.com/inflight/-/inflight-1.0.6.tgz#49bd6331d7d02d0c09bc910a1075ba8165b56df9" + integrity sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA== + dependencies: + once "^1.3.0" + wrappy "1" + +inherits@2, inherits@2.0.4, inherits@^2.0.3: + version "2.0.4" + resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.4.tgz#0fa2c64f932917c3433a0ded55363aae37416b7c" + integrity sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ== + +ipaddr.js@1.9.1: + version "1.9.1" + resolved "https://registry.yarnpkg.com/ipaddr.js/-/ipaddr.js-1.9.1.tgz#bff38543eeb8984825079ff3a2a8e6cbd46781b3" + integrity sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g== + +is-promise@^2.1.0: + version "2.2.2" + resolved "https://registry.yarnpkg.com/is-promise/-/is-promise-2.2.2.tgz#39ab959ccbf9a774cf079f7b40c7a26f763135f1" + integrity sha512-+lP4/6lKUBfQjZ2pdxThZvLUAafmZb8OAxFb8XXtiQmS35INgr85hdOGoEs124ez1FCnZJt6jau/T+alh58QFQ== + +is-typedarray@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-typedarray/-/is-typedarray-1.0.0.tgz#e479c80858df0c1b11ddda6940f96011fcda4a9a" + integrity sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA== + +isstream@~0.1.2: + version "0.1.2" + resolved "https://registry.yarnpkg.com/isstream/-/isstream-0.1.2.tgz#47e63f7af55afa6f92e1500e690eb8b8529c099a" + integrity sha512-Yljz7ffyPbrLpLngrMtZ7NduUgVvi6wG9RJ9IUcyCd59YQ911PBJphODUcbOVbqYfxe1wuYf/LJ8PauMRwsM/g== + +js-yaml@4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-4.1.0.tgz#c1fb65f8f5017901cdd2c951864ba18458a10602" + integrity sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA== + dependencies: + argparse "^2.0.1" + +jsbn@~0.1.0: + version "0.1.1" + resolved "https://registry.yarnpkg.com/jsbn/-/jsbn-0.1.1.tgz#a5e654c2e5a2deb5f201d96cefbca80c0ef2f513" + integrity sha512-UVU9dibq2JcFWxQPA6KCqj5O42VOmAY3zQUfEKxU0KpTGXwNoCjkX1e13eHNvw/xPynt6pU0rZ1htjWTNTSXsg== + +json-schema-traverse@^0.4.1: + version "0.4.1" + resolved "https://registry.yarnpkg.com/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz#69f6a87d9513ab8bb8fe63bdb0979c448e684660" + integrity sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg== + +json-schema-traverse@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz#ae7bcb3656ab77a73ba5c49bf654f38e6b6860e2" + integrity sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug== + +json-schema@0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/json-schema/-/json-schema-0.4.0.tgz#f7de4cf6efab838ebaeb3236474cbba5a1930ab5" + integrity sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA== + +json-stringify-safe@~5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz#1296a2d58fd45f19a0f6ce01d65701e2c735b6eb" + integrity sha512-ZClg6AaYvamvYEE82d3Iyd3vSSIjQ+odgjaTzRuO3s7toCdFKczob2i0zCh7JE8kWn17yvAWhUVxvqGwUalsRA== + +jsonparse@^1.2.0: + version "1.3.1" + resolved "https://registry.yarnpkg.com/jsonparse/-/jsonparse-1.3.1.tgz#3f4dae4a91fac315f71062f8521cc239f1366280" + integrity sha512-POQXvpdL69+CluYsillJ7SUhKvytYjW9vG/GKpnf+xP8UWgYEM/RaMzHHofbALDiKbbP1W8UEYmgGl39WkPZsg== + +jsonwebtoken@9.0.0: + version "9.0.0" + resolved "https://registry.yarnpkg.com/jsonwebtoken/-/jsonwebtoken-9.0.0.tgz#d0faf9ba1cc3a56255fe49c0961a67e520c1926d" + integrity sha512-tuGfYXxkQGDPnLJ7SibiQgVgeDgfbPq2k2ICcbgqW8WxWLBAxKQM/ZCu/IT8SOSwmaYl4dpTFCW5xZv7YbbWUw== + dependencies: + jws "^3.2.2" + lodash "^4.17.21" + ms "^2.1.1" + semver "^7.3.8" + +jsonwebtoken@9.0.2: + version "9.0.2" + resolved "https://registry.yarnpkg.com/jsonwebtoken/-/jsonwebtoken-9.0.2.tgz#65ff91f4abef1784697d40952bb1998c504caaf3" + integrity sha512-PRp66vJ865SSqOlgqS8hujT5U4AOgMfhrwYIuIhfKaoSCZcirrmASQr8CX7cUg+RMih+hgznrjp99o+W4pJLHQ== + dependencies: + jws "^3.2.2" + lodash.includes "^4.3.0" + lodash.isboolean "^3.0.3" + lodash.isinteger "^4.0.4" + lodash.isnumber "^3.0.3" + lodash.isplainobject "^4.0.6" + lodash.isstring "^4.0.1" + lodash.once "^4.0.0" + ms "^2.1.1" + semver "^7.5.4" + +jsprim@^1.2.2: + version "1.4.2" + resolved "https://registry.yarnpkg.com/jsprim/-/jsprim-1.4.2.tgz#712c65533a15c878ba59e9ed5f0e26d5b77c5feb" + integrity sha512-P2bSOMAc/ciLz6DzgjVlGJP9+BrJWu5UDGK70C2iweC5QBIeFf0ZXRvGjEj2uYgrY2MkAAhsSWHDWlFtEroZWw== + dependencies: + assert-plus "1.0.0" + extsprintf "1.3.0" + json-schema "0.4.0" + verror "1.10.0" + +jwa@^1.4.1: + version "1.4.1" + resolved "https://registry.yarnpkg.com/jwa/-/jwa-1.4.1.tgz#743c32985cb9e98655530d53641b66c8645b039a" + integrity sha512-qiLX/xhEEFKUAJ6FiBMbes3w9ATzyk5W7Hvzpa/SLYdxNtng+gcurvrI7TbACjIXlsJyr05/S1oUhZrc63evQA== + dependencies: + buffer-equal-constant-time "1.0.1" + ecdsa-sig-formatter "1.0.11" + safe-buffer "^5.0.1" + +jws@^3.2.2: + version "3.2.2" + resolved "https://registry.yarnpkg.com/jws/-/jws-3.2.2.tgz#001099f3639468c9414000e99995fa52fb478304" + integrity sha512-YHlZCB6lMTllWDtSPHz/ZXTsi8S00usEV6v1tjq8tOUZzw7DpSDWVXjXDre6ed1w/pd495ODpHZYSdkRTsa0HA== + dependencies: + jwa "^1.4.1" + safe-buffer "^5.0.1" + +keygrip@~1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/keygrip/-/keygrip-1.1.0.tgz#871b1681d5e159c62a445b0c74b615e0917e7226" + integrity sha512-iYSchDJ+liQ8iwbSI2QqsQOvqv58eJCEanyJPJi+Khyu8smkcKSFUCbPwzFcL7YVtZ6eONjqRX/38caJ7QjRAQ== + dependencies: + tsscmp "1.0.6" + +kleur@4.1.5: + version "4.1.5" + resolved "https://registry.yarnpkg.com/kleur/-/kleur-4.1.5.tgz#95106101795f7050c6c650f350c683febddb1780" + integrity sha512-o+NO+8WrRiQEE4/7nwRJhN1HWpVmJm511pBHUxPLtp0BUISzlBplORYSmTclCnJvQq2tKu/sgl3xVpkc7ZWuQQ== + +lockfile@1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/lockfile/-/lockfile-1.0.4.tgz#07f819d25ae48f87e538e6578b6964a4981a5609" + integrity sha512-cvbTwETRfsFh4nHsL1eGWapU1XFi5Ot9E85sWAwia7Y7EgB7vfqcZhTKZ+l7hCGxSPoushMv5GKhT5PdLv03WA== + dependencies: + signal-exit "^3.0.2" + +lodash-es@^4.17.21: + version "4.17.21" + resolved "https://registry.yarnpkg.com/lodash-es/-/lodash-es-4.17.21.tgz#43e626c46e6591b7750beb2b50117390c609e3ee" + integrity sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw== + +lodash.includes@^4.3.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/lodash.includes/-/lodash.includes-4.3.0.tgz#60bb98a87cb923c68ca1e51325483314849f553f" + integrity sha512-W3Bx6mdkRTGtlJISOvVD/lbqjTlPPUDTMnlXZFnVwi9NKJ6tiAk6LVdlhZMm17VZisqhKcgzpO5Wz91PCt5b0w== + +lodash.isboolean@^3.0.3: + version "3.0.3" + resolved "https://registry.yarnpkg.com/lodash.isboolean/-/lodash.isboolean-3.0.3.tgz#6c2e171db2a257cd96802fd43b01b20d5f5870f6" + integrity sha512-Bz5mupy2SVbPHURB98VAcw+aHh4vRV5IPNhILUCsOzRmsTmSQ17jIuqopAentWoehktxGd9e/hbIXq980/1QJg== + +lodash.isinteger@^4.0.4: + version "4.0.4" + resolved "https://registry.yarnpkg.com/lodash.isinteger/-/lodash.isinteger-4.0.4.tgz#619c0af3d03f8b04c31f5882840b77b11cd68343" + integrity sha512-DBwtEWN2caHQ9/imiNeEA5ys1JoRtRfY3d7V9wkqtbycnAmTvRRmbHKDV4a0EYc678/dia0jrte4tjYwVBaZUA== + +lodash.isnumber@^3.0.3: + version "3.0.3" + resolved "https://registry.yarnpkg.com/lodash.isnumber/-/lodash.isnumber-3.0.3.tgz#3ce76810c5928d03352301ac287317f11c0b1ffc" + integrity sha512-QYqzpfwO3/CWf3XP+Z+tkQsfaLL/EnUlXWVkIk5FUPc4sBdTehEqZONuyRt2P67PXAk+NXmTBcc97zw9t1FQrw== + +lodash.isplainobject@^4.0.6: + version "4.0.6" + resolved "https://registry.yarnpkg.com/lodash.isplainobject/-/lodash.isplainobject-4.0.6.tgz#7c526a52d89b45c45cc690b88163be0497f550cb" + integrity sha512-oSXzaWypCMHkPC3NvBEaPHf0KsA5mvPrOPgQWDsbg8n7orZ290M0BmC/jgRZ4vcJ6DTAhjrsSYgdsW/F+MFOBA== + +lodash.isstring@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/lodash.isstring/-/lodash.isstring-4.0.1.tgz#d527dfb5456eca7cc9bb95d5daeaf88ba54a5451" + integrity sha512-0wJxfxH1wgO3GrbuP+dTTk7op+6L41QCXbGINEmD+ny/G/eCqGzxyCsh7159S+mgDDcoarnBw6PC1PS5+wUGgw== + +lodash.once@^4.0.0: + version "4.1.1" + resolved "https://registry.yarnpkg.com/lodash.once/-/lodash.once-4.1.1.tgz#0dd3971213c7c56df880977d504c88fb471a97ac" + integrity sha512-Sb487aTOCr9drQVL8pIxOzVhafOjZN9UU54hiN8PU3uAiSV7lx1yYNpbNmex2PK6dSJoNTSJUUswT651yww3Mg== + +lodash@4, lodash@4.17.21, lodash@^4.17.21: + version "4.17.21" + resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.21.tgz#679591c564c3bffaae8454cf0b3df370c3d6911c" + integrity sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg== + +lowdb@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/lowdb/-/lowdb-1.0.0.tgz#5243be6b22786ccce30e50c9a33eac36b20c8064" + integrity sha512-2+x8esE/Wb9SQ1F9IHaYWfsC9FIecLOPrK4g17FGEayjUWH172H6nwicRovGvSE2CPZouc2MCIqCI7h9d+GftQ== + dependencies: + graceful-fs "^4.1.3" + is-promise "^2.1.0" + lodash "4" + pify "^3.0.0" + steno "^0.4.1" + +lru-cache@7.18.3: + version "7.18.3" + resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-7.18.3.tgz#f793896e0fd0e954a59dfdd82f0773808df6aa89" + integrity sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA== + +lru-cache@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-6.0.0.tgz#6d6fe6570ebd96aaf90fcad1dafa3b2566db3a94" + integrity sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA== + dependencies: + yallist "^4.0.0" + +media-typer@0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/media-typer/-/media-typer-0.3.0.tgz#8710d7af0aa626f8fffa1ce00168545263255748" + integrity sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ== + +merge-descriptors@1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/merge-descriptors/-/merge-descriptors-1.0.1.tgz#b00aaa556dd8b44568150ec9d1b953f3f90cbb61" + integrity sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w== + +methods@~1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/methods/-/methods-1.1.2.tgz#5529a4d67654134edcc5266656835b0f851afcee" + integrity sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w== + +mime-db@1.52.0, "mime-db@>= 1.43.0 < 2": + version "1.52.0" + resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.52.0.tgz#bbabcdc02859f4987301c856e3387ce5ec43bf70" + integrity sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg== + +mime-types@^2.1.12, mime-types@~2.1.19, mime-types@~2.1.24, mime-types@~2.1.34: + version "2.1.35" + resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.35.tgz#381a871b62a734450660ae3deee44813f70d959a" + integrity sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw== + dependencies: + mime-db "1.52.0" + +mime@1.6.0: + version "1.6.0" + resolved "https://registry.yarnpkg.com/mime/-/mime-1.6.0.tgz#32cd9e5c64553bd58d19a568af452acff04981b1" + integrity sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg== + +mime@2.6.0: + version "2.6.0" + resolved "https://registry.yarnpkg.com/mime/-/mime-2.6.0.tgz#a2a682a95cd4d0cb1d6257e28f83da7e35800367" + integrity sha512-USPkMeET31rOMiarsBNIHZKLGgvKc/LrjofAnBlOttf5ajRvqiRA8QsenbcooctK6d6Ts6aqZXBA+XbkKthiQg== + +mime@3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/mime/-/mime-3.0.0.tgz#b374550dca3a0c18443b0c950a6a58f1931cf7a7" + integrity sha512-jSCU7/VB1loIWBZe14aEYHU/+1UMEHoaO7qxCOVJOw9GgH72VAWppxNcjU+x9a2k3GSIBXNKxXQFqRvvZ7vr3A== + +"minimatch@2 || 3", minimatch@3.1.2: + version "3.1.2" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.1.2.tgz#19cd194bfd3e428f049a70817c038d89ab4be35b" + integrity sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw== + dependencies: + brace-expansion "^1.1.7" + +minimist@^1.2.5, minimist@^1.2.6: + version "1.2.8" + resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.8.tgz#c1a464e7693302e082a075cee0c057741ac4772c" + integrity sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA== + +mitt@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/mitt/-/mitt-3.0.1.tgz#ea36cf0cc30403601ae074c8f77b7092cdab36d1" + integrity sha512-vKivATfr97l2/QBCYAkXYDbrIWPM2IIKEl7YPhjCvKlG3kE2gm+uBo6nEXK3M5/Ffh/FLpKExzOQ3JJoJGFKBw== + +mkdirp@1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-1.0.4.tgz#3eb5ed62622756d79a5f0e2a221dfebad75c2f7e" + integrity sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw== + +mkdirp@~0.5.1: + version "0.5.6" + resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.6.tgz#7def03d2432dcae4ba1d611445c48396062255f6" + integrity sha512-FP+p8RB8OWpF3YZBCrP5gtADmtXApB5AMLn+vdyA+PyxCjrCs00mjyUozssO33cwDeT3wNGdLxJ5M//YqtHAJw== + dependencies: + minimist "^1.2.6" + +ms@2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/ms/-/ms-2.0.0.tgz#5608aeadfc00be6c2901df5f9861788de0d597c8" + integrity sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A== + +ms@2.1.2: + version "2.1.2" + resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.2.tgz#d09d1f357b443f493382a8eb3ccd183872ae6009" + integrity sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w== + +ms@2.1.3, ms@^2.1.1: + version "2.1.3" + resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.3.tgz#574c8138ce1d2b5861f0b44579dbadd60c6615b2" + integrity sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA== + +mv@2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/mv/-/mv-2.1.1.tgz#ae6ce0d6f6d5e0a4f7d893798d03c1ea9559b6a2" + integrity sha512-at/ZndSy3xEGJ8i0ygALh8ru9qy7gWW1cmkaqBN29JmMlIvM//MEO9y1sk/avxuwnPcfhkejkLsuPxH81BrkSg== + dependencies: + mkdirp "~0.5.1" + ncp "~2.0.0" + rimraf "~2.4.0" + +nanoclone@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/nanoclone/-/nanoclone-0.2.1.tgz#dd4090f8f1a110d26bb32c49ed2f5b9235209ed4" + integrity sha512-wynEP02LmIbLpcYw8uBKpcfF6dmg2vcpKqxeH5UcoKEYdExslsdUA4ugFauuaeYdTB76ez6gJW8XAZ6CgkXYxA== + +ncp@~2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/ncp/-/ncp-2.0.0.tgz#195a21d6c46e361d2fb1281ba38b91e9df7bdbb3" + integrity sha512-zIdGUrPRFTUELUvr3Gmc7KZ2Sw/h1PiVM0Af/oHB6zgnV1ikqSfRk+TOufi79aHYCW3NiOXmr1BP5nWbzojLaA== + +negotiator@0.6.3: + version "0.6.3" + resolved "https://registry.yarnpkg.com/negotiator/-/negotiator-0.6.3.tgz#58e323a72fedc0d6f9cd4d31fe49f51479590ccd" + integrity sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg== + +neo-async@^2.6.2: + version "2.6.2" + resolved "https://registry.yarnpkg.com/neo-async/-/neo-async-2.6.2.tgz#b4aafb93e3aeb2d8174ca53cf163ab7d7308305f" + integrity sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw== + +node-fetch@cjs: + version "2.6.7" + resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.6.7.tgz#24de9fba827e3b4ae44dc8b20256a379160052ad" + integrity sha512-ZjMPFEfVx5j+y2yF35Kzx5sF7kDzxuDj6ziH4FFbOp87zKDZNx8yExJIb05OGF4Nlt9IHFIMBkRl41VdvcNdbQ== + dependencies: + whatwg-url "^5.0.0" + +oauth-sign@~0.9.0: + version "0.9.0" + resolved "https://registry.yarnpkg.com/oauth-sign/-/oauth-sign-0.9.0.tgz#47a7b016baa68b5fa0ecf3dee08a85c679ac6455" + integrity sha512-fexhUFFPTGV8ybAtSIGbV6gOkSv8UtRbDBnAyLQw4QPKkgNlsH2ByPGtMUqdWkos6YCRmAqViwgZrJc/mRDzZQ== + +object-assign@^4: + version "4.1.1" + resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863" + integrity sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg== + +object-inspect@^1.9.0: + version "1.12.3" + resolved "https://registry.yarnpkg.com/object-inspect/-/object-inspect-1.12.3.tgz#ba62dffd67ee256c8c086dfae69e016cd1f198b9" + integrity sha512-geUvdk7c+eizMNUDkRpW1wJwgfOiOeHbxBR/hLXK1aT6zmVSO0jsQcs7fj6MGw89jC/cjGfLcNOrtMYtGqm81g== + +on-exit-leak-free@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/on-exit-leak-free/-/on-exit-leak-free-0.2.0.tgz#b39c9e3bf7690d890f4861558b0d7b90a442d209" + integrity sha512-dqaz3u44QbRXQooZLTUKU41ZrzYrcvLISVgbrzbyCMxpmSLJvZ3ZamIJIZ29P6OhZIkNIQKosdeM6t1LYbA9hg== + +on-finished@2.4.1: + version "2.4.1" + resolved "https://registry.yarnpkg.com/on-finished/-/on-finished-2.4.1.tgz#58c8c44116e54845ad57f14ab10b03533184ac3f" + integrity sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg== + dependencies: + ee-first "1.1.1" + +on-headers@~1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/on-headers/-/on-headers-1.0.2.tgz#772b0ae6aaa525c399e489adfad90c403eb3c28f" + integrity sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA== + +once@^1.3.0, once@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1" + integrity sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w== + dependencies: + wrappy "1" + +pako@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/pako/-/pako-2.1.0.tgz#266cc37f98c7d883545d11335c00fbd4062c9a86" + integrity sha512-w+eufiZ1WuJYgPXbV/PO3NCMEc3xqylkKHzp8bxp1uW4qaSNQUkwmLLEc3kKsfz8lpV1F8Ht3U1Cm+9Srog2ug== + +parseurl@~1.3.3: + version "1.3.3" + resolved "https://registry.yarnpkg.com/parseurl/-/parseurl-1.3.3.tgz#9da19e7bee8d12dff0513ed5b76957793bc2e8d4" + integrity sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ== + +path-is-absolute@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f" + integrity sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg== + +path-to-regexp@0.1.7: + version "0.1.7" + resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-0.1.7.tgz#df604178005f522f15eb4490e7247a1bfaa67f8c" + integrity sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ== + +performance-now@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/performance-now/-/performance-now-2.1.0.tgz#6309f4e0e5fa913ec1c69307ae364b4b377c9e7b" + integrity sha512-7EAHlyLHI56VEIdK57uwHdHKIaAGbnXPiw0yWbarQZOKaKpvUIgW0jWRVLiatnM+XXlSwsanIBH/hzGMJulMow== + +pify@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/pify/-/pify-3.0.0.tgz#e5a4acd2c101fdf3d9a4d07f0dbc4db49dd28176" + integrity sha512-C3FsVNH1udSEX48gGX1xfvwTWfsYWj5U+8/uK15BGzIGrKoUpghX8hWZwa/OFnakBiiVNmBvemTJR5mcy7iPcg== + +pino-abstract-transport@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/pino-abstract-transport/-/pino-abstract-transport-1.0.0.tgz#cc0d6955fffcadb91b7b49ef220a6cc111d48bb3" + integrity sha512-c7vo5OpW4wIS42hUVcT5REsL8ZljsUfBjqV/e2sFxmFEFZiq1XLUp5EYLtuDH6PEHq9W1egWqRbnLUP5FuZmOA== + dependencies: + readable-stream "^4.0.0" + split2 "^4.0.0" + +pino-abstract-transport@v0.5.0: + version "0.5.0" + resolved "https://registry.yarnpkg.com/pino-abstract-transport/-/pino-abstract-transport-0.5.0.tgz#4b54348d8f73713bfd14e3dc44228739aa13d9c0" + integrity sha512-+KAgmVeqXYbTtU2FScx1XS3kNyfZ5TrXY07V96QnUSFqo2gAqlvmaxH67Lj7SWazqsMabf+58ctdTcBgnOLUOQ== + dependencies: + duplexify "^4.1.2" + split2 "^4.0.0" + +pino-std-serializers@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/pino-std-serializers/-/pino-std-serializers-4.0.0.tgz#1791ccd2539c091ae49ce9993205e2cd5dbba1e2" + integrity sha512-cK0pekc1Kjy5w9V2/n+8MkZwusa6EyyxfeQCB799CQRhRt/CqYKiWs5adeu8Shve2ZNffvfC/7J64A2PJo1W/Q== + +pino@7.11.0: + version "7.11.0" + resolved "https://registry.yarnpkg.com/pino/-/pino-7.11.0.tgz#0f0ea5c4683dc91388081d44bff10c83125066f6" + integrity sha512-dMACeu63HtRLmCG8VKdy4cShCPKaYDR4youZqoSWLxl5Gu99HUw8bw75thbPv9Nip+H+QYX8o3ZJbTdVZZ2TVg== + dependencies: + atomic-sleep "^1.0.0" + fast-redact "^3.0.0" + on-exit-leak-free "^0.2.0" + pino-abstract-transport v0.5.0 + pino-std-serializers "^4.0.0" + process-warning "^1.0.0" + quick-format-unescaped "^4.0.3" + real-require "^0.1.0" + safe-stable-stringify "^2.1.0" + sonic-boom "^2.2.1" + thread-stream "^0.15.1" + +pkginfo@0.4.1: + version "0.4.1" + resolved "https://registry.yarnpkg.com/pkginfo/-/pkginfo-0.4.1.tgz#b5418ef0439de5425fc4995042dced14fb2a84ff" + integrity sha512-8xCNE/aT/EXKenuMDZ+xTVwkT8gsoHN2z/Q29l80u0ppGEXVvsKRzNMbtKhg8LS8k1tJLAHHylf6p4VFmP6XUQ== + +prettier@^3.2.4: + version "3.2.4" + resolved "https://registry.yarnpkg.com/prettier/-/prettier-3.2.4.tgz#4723cadeac2ce7c9227de758e5ff9b14e075f283" + integrity sha512-FWu1oLHKCrtpO1ypU6J0SbK2d9Ckwysq6bHj/uaCP26DxrPpppCLQRGVuqAxSTvhF00AcvDRyYrLNW7ocBhFFQ== + +process-warning@1.0.0, process-warning@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/process-warning/-/process-warning-1.0.0.tgz#980a0b25dc38cd6034181be4b7726d89066b4616" + integrity sha512-du4wfLyj4yCZq1VupnVSZmRsPJsNuxoDQFdCFHLaYiEbFBD7QE0a+I4D7hOxrVnh78QE/YipFAj9lXHiXocV+Q== + +process@^0.11.10: + version "0.11.10" + resolved "https://registry.yarnpkg.com/process/-/process-0.11.10.tgz#7332300e840161bda3e69a1d1d91a7d4bc16f182" + integrity sha512-cdGef/drWFoydD1JsMzuFf8100nZl+GT+yacc2bEced5f9Rjk4z+WtFUTBu9PhOi9j/jfmBPu0mMEY4wIdAF8A== + +property-expr@^2.0.4: + version "2.0.5" + resolved "https://registry.yarnpkg.com/property-expr/-/property-expr-2.0.5.tgz#278bdb15308ae16af3e3b9640024524f4dc02cb4" + integrity sha512-IJUkICM5dP5znhCckHSv30Q4b5/JA5enCtkRHYaOVOAocnH/1BQEYTC5NMfT3AVl/iXKdr3aqQbQn9DxyWknwA== + +proxy-addr@~2.0.7: + version "2.0.7" + resolved "https://registry.yarnpkg.com/proxy-addr/-/proxy-addr-2.0.7.tgz#f19fe69ceab311eeb94b42e70e8c2070f9ba1025" + integrity sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg== + dependencies: + forwarded "0.2.0" + ipaddr.js "1.9.1" + +proxy-from-env@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/proxy-from-env/-/proxy-from-env-1.1.0.tgz#e102f16ca355424865755d2c9e8ea4f24d58c3e2" + integrity sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg== + +psl@^1.1.28: + version "1.9.0" + resolved "https://registry.yarnpkg.com/psl/-/psl-1.9.0.tgz#d0df2a137f00794565fcaf3b2c00cd09f8d5a5a7" + integrity sha512-E/ZsdU4HLs/68gYzgGTkMicWTLPdAftJLfJFlLUAAKZGkStNU72sZjT66SnMDVOfOWY/YAoiD7Jxa9iHvngcag== + +punycode@^2.1.0, punycode@^2.1.1: + version "2.3.0" + resolved "https://registry.yarnpkg.com/punycode/-/punycode-2.3.0.tgz#f67fa67c94da8f4d0cfff981aee4118064199b8f" + integrity sha512-rRV+zQD8tVFys26lAGR9WUuS4iUAngJScM+ZRSKtvl5tKeZ2t5bvdNFdNHBW9FWR4guGHlgmsZ1G7BSm2wTbuA== + +qs@6.11.0: + version "6.11.0" + resolved "https://registry.yarnpkg.com/qs/-/qs-6.11.0.tgz#fd0d963446f7a65e1367e01abd85429453f0c37a" + integrity sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q== + dependencies: + side-channel "^1.0.4" + +qs@~6.5.2: + version "6.5.3" + resolved "https://registry.yarnpkg.com/qs/-/qs-6.5.3.tgz#3aeeffc91967ef6e35c0e488ef46fb296ab76aad" + integrity sha512-qxXIEh4pCGfHICj1mAJQ2/2XVZkjCDTcEgfoSQxc/fYivUZxTkk7L3bDBJSoNrEzXI17oUO5Dp07ktqE5KzczA== + +quick-format-unescaped@^4.0.3: + version "4.0.4" + resolved "https://registry.yarnpkg.com/quick-format-unescaped/-/quick-format-unescaped-4.0.4.tgz#93ef6dd8d3453cbc7970dd614fad4c5954d6b5a7" + integrity sha512-tYC1Q1hgyRuHgloV/YXs2w15unPVh8qfu/qCTfhTYamaw7fyhumKa2yGpdSo87vY32rIclj+4fWYQXUMs9EHvg== + +range-parser@~1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/range-parser/-/range-parser-1.2.1.tgz#3cf37023d199e1c24d1a55b84800c2f3e6468031" + integrity sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg== + +raw-body@2.5.1: + version "2.5.1" + resolved "https://registry.yarnpkg.com/raw-body/-/raw-body-2.5.1.tgz#fe1b1628b181b700215e5fd42389f98b71392857" + integrity sha512-qqJBtEyVgS0ZmPGdCFPWJ3FreoqvG4MVQln/kCgF7Olq95IbOp0/BWyMwbdtn4VTvkM8Y7khCQ2Xgk/tcrCXig== + dependencies: + bytes "3.1.2" + http-errors "2.0.0" + iconv-lite "0.4.24" + unpipe "1.0.0" + +readable-stream@^3.1.1: + version "3.6.2" + resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-3.6.2.tgz#56a9b36ea965c00c5a93ef31eb111a0f11056967" + integrity sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA== + dependencies: + inherits "^2.0.3" + string_decoder "^1.1.1" + util-deprecate "^1.0.1" + +readable-stream@^4.0.0: + version "4.4.0" + resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-4.4.0.tgz#55ce132d60a988c460d75c631e9ccf6a7229b468" + integrity sha512-kDMOq0qLtxV9f/SQv522h8cxZBqNZXuXNyjyezmfAAuribMyVXziljpQ/uQhfE1XLg2/TLTW2DsnoE4VAi/krg== + dependencies: + abort-controller "^3.0.0" + buffer "^6.0.3" + events "^3.3.0" + process "^0.11.10" + +real-require@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/real-require/-/real-require-0.1.0.tgz#736ac214caa20632847b7ca8c1056a0767df9381" + integrity sha512-r/H9MzAWtrv8aSVjPCMFpDMl5q66GqtmmRkRjpHTsp4zBAa+snZyiQNlMONiUmEJcsnaw0wCauJ2GWODr/aFkg== + +regenerator-runtime@^0.13.11: + version "0.13.11" + resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.13.11.tgz#f6dca3e7ceec20590d07ada785636a90cdca17f9" + integrity sha512-kY1AZVr2Ra+t+piVaJ4gxaFaReZVH40AKNo7UCX6W+dEwBo/2oZJzqfuN1qLq1oL45o56cPaTXELwrTh8Fpggg== + +request@2.88.2: + version "2.88.2" + resolved "https://registry.yarnpkg.com/request/-/request-2.88.2.tgz#d73c918731cb5a87da047e207234146f664d12b3" + integrity sha512-MsvtOrfG9ZcrOwAW+Qi+F6HbD0CWXEh9ou77uOb7FM2WPhwT7smM833PzanhJLsgXjN89Ir6V2PczXNnMpwKhw== + dependencies: + aws-sign2 "~0.7.0" + aws4 "^1.8.0" + caseless "~0.12.0" + combined-stream "~1.0.6" + extend "~3.0.2" + forever-agent "~0.6.1" + form-data "~2.3.2" + har-validator "~5.1.3" + http-signature "~1.2.0" + is-typedarray "~1.0.0" + isstream "~0.1.2" + json-stringify-safe "~5.0.1" + mime-types "~2.1.19" + oauth-sign "~0.9.0" + performance-now "^2.1.0" + qs "~6.5.2" + safe-buffer "^5.1.2" + tough-cookie "~2.5.0" + tunnel-agent "^0.6.0" + uuid "^3.3.2" + +require-from-string@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/require-from-string/-/require-from-string-2.0.2.tgz#89a7fdd938261267318eafe14f9c32e598c36909" + integrity sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw== + +rimraf@~2.4.0: + version "2.4.5" + resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.4.5.tgz#ee710ce5d93a8fdb856fb5ea8ff0e2d75934b2da" + integrity sha512-J5xnxTyqaiw06JjMftq7L9ouA448dw/E7dKghkP9WpKNuwmARNNg+Gk8/u5ryb9N/Yo2+z3MCwuqFK/+qPOPfQ== + dependencies: + glob "^6.0.1" + +rrdom@^0.1.7: + version "0.1.7" + resolved "https://registry.yarnpkg.com/rrdom/-/rrdom-0.1.7.tgz#f2f49bfd01b59291bb7b0d981371a5e02a18e2aa" + integrity sha512-ZLd8f14z9pUy2Hk9y636cNv5Y2BMnNEY99wxzW9tD2BLDfe1xFxtLjB4q/xCBYo6HRe0wofzKzjm4JojmpBfFw== + dependencies: + rrweb-snapshot "^2.0.0-alpha.4" + +rrweb-snapshot@^2.0.0-alpha.15: + version "2.0.0-alpha.15" + resolved "https://registry.yarnpkg.com/rrweb-snapshot/-/rrweb-snapshot-2.0.0-alpha.15.tgz#26107a89d9b88e239107f3c8d1ef7897531e5932" + integrity sha512-U5lOYoMt6YsmxKcnWVhX1ller6B8/OICQqLWqvylajTYv5oRHgAk68AFDTAXxLsjMSTQjtr1Vp+REFSmnhWrIg== + +rrweb-snapshot@^2.0.0-alpha.4: + version "2.0.0-alpha.4" + resolved "https://registry.yarnpkg.com/rrweb-snapshot/-/rrweb-snapshot-2.0.0-alpha.4.tgz#2801bf5946177b9d685a01661a62d9d2e958f174" + integrity sha512-KQ2OtPpXO5jLYqg1OnXS/Hf+EzqnZyP5A+XPqBCjYpj3XIje/Od4gdUwjbFo3cVuWq5Cw5Y1d3/xwgIS7/XpQQ== + +rrweb@^2.0.0-alpha.4: + version "2.0.0-alpha.4" + resolved "https://registry.yarnpkg.com/rrweb/-/rrweb-2.0.0-alpha.4.tgz#3c7cf2f1bcf44f7a88dd3fad00ee8d6dd711f258" + integrity sha512-wEHUILbxDPcNwkM3m4qgPgXAiBJyqCbbOHyVoNEVBJzHszWEFYyTbrZqUdeb1EfmTRC2PsumCIkVcomJ/xcOzA== + dependencies: + "@rrweb/types" "^2.0.0-alpha.4" + "@types/css-font-loading-module" "0.0.7" + "@xstate/fsm" "^1.4.0" + base64-arraybuffer "^1.0.1" + fflate "^0.4.4" + mitt "^3.0.0" + rrdom "^0.1.7" + rrweb-snapshot "^2.0.0-alpha.4" + +safe-buffer@5.1.2: + version "5.1.2" + resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.1.2.tgz#991ec69d296e0313747d59bdfd2b745c35f8828d" + integrity sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g== + +safe-buffer@5.2.1, safe-buffer@^5.0.1, safe-buffer@^5.1.2, safe-buffer@~5.2.0: + version "5.2.1" + resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.2.1.tgz#1eaf9fa9bdb1fdd4ec75f58f9cdb4e6b7827eec6" + integrity sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ== + +safe-stable-stringify@^2.1.0: + version "2.4.3" + resolved "https://registry.yarnpkg.com/safe-stable-stringify/-/safe-stable-stringify-2.4.3.tgz#138c84b6f6edb3db5f8ef3ef7115b8f55ccbf886" + integrity sha512-e2bDA2WJT0wxseVd4lsDP4+3ONX6HpMXQa1ZhFQ7SU+GjvORCmShbCMltrtIDfkYhVHrOcPtj+KhmDBdPdZD1g== + +"safer-buffer@>= 2.1.2 < 3", safer-buffer@^2.0.2, safer-buffer@^2.1.0, safer-buffer@~2.1.0: + version "2.1.2" + resolved "https://registry.yarnpkg.com/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a" + integrity sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg== + +semver@7.5.4, semver@^7.3.8, semver@^7.5.4: + version "7.5.4" + resolved "https://registry.yarnpkg.com/semver/-/semver-7.5.4.tgz#483986ec4ed38e1c6c48c34894a9182dbff68a6e" + integrity sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA== + dependencies: + lru-cache "^6.0.0" + +send@0.18.0: + version "0.18.0" + resolved "https://registry.yarnpkg.com/send/-/send-0.18.0.tgz#670167cc654b05f5aa4a767f9113bb371bc706be" + integrity sha512-qqWzuOjSFOuqPjFe4NOsMLafToQQwBSOEpS+FwEt3A2V3vKubTquT3vmLTQpFgMXp8AlFWFuP1qKaJZOtPpVXg== + dependencies: + debug "2.6.9" + depd "2.0.0" + destroy "1.2.0" + encodeurl "~1.0.2" + escape-html "~1.0.3" + etag "~1.8.1" + fresh "0.5.2" + http-errors "2.0.0" + mime "1.6.0" + ms "2.1.3" + on-finished "2.4.1" + range-parser "~1.2.1" + statuses "2.0.1" + +serve-static@1.15.0: + version "1.15.0" + resolved "https://registry.yarnpkg.com/serve-static/-/serve-static-1.15.0.tgz#faaef08cffe0a1a62f60cad0c4e513cff0ac9540" + integrity sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g== + dependencies: + encodeurl "~1.0.2" + escape-html "~1.0.3" + parseurl "~1.3.3" + send "0.18.0" + +setprototypeof@1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/setprototypeof/-/setprototypeof-1.2.0.tgz#66c9a24a73f9fc28cbe66b09fed3d33dcaf1b424" + integrity sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw== + +side-channel@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/side-channel/-/side-channel-1.0.4.tgz#efce5c8fdc104ee751b25c58d4290011fa5ea2cf" + integrity sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw== + dependencies: + call-bind "^1.0.0" + get-intrinsic "^1.0.2" + object-inspect "^1.9.0" + +signal-exit@^3.0.2: + version "3.0.7" + resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.7.tgz#a9a1767f8af84155114eaabd73f99273c8f59ad9" + integrity sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ== + +sonic-boom@3.3.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/sonic-boom/-/sonic-boom-3.3.0.tgz#cffab6dafee3b2bcb88d08d589394198bee1838c" + integrity sha512-LYxp34KlZ1a2Jb8ZQgFCK3niIHzibdwtwNUWKg0qQRzsDoJ3Gfgkf8KdBTFU3SkejDEIlWwnSnpVdOZIhFMl/g== + dependencies: + atomic-sleep "^1.0.0" + +sonic-boom@^2.2.1: + version "2.8.0" + resolved "https://registry.yarnpkg.com/sonic-boom/-/sonic-boom-2.8.0.tgz#c1def62a77425090e6ad7516aad8eb402e047611" + integrity sha512-kuonw1YOYYNOve5iHdSahXPOK49GqwA+LZhI6Wz/l0rP57iKyXXIHaRagOBHAPmGwJC6od2Z9zgvZ5loSgMlVg== + dependencies: + atomic-sleep "^1.0.0" + +source-map@^0.6.1: + version "0.6.1" + resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.6.1.tgz#74722af32e9614e9c287a8d0bbde48b5e2f1a263" + integrity sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g== + +split2@^4.0.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/split2/-/split2-4.2.0.tgz#c9c5920904d148bab0b9f67145f245a86aadbfa4" + integrity sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg== + +sshpk@^1.7.0: + version "1.17.0" + resolved "https://registry.yarnpkg.com/sshpk/-/sshpk-1.17.0.tgz#578082d92d4fe612b13007496e543fa0fbcbe4c5" + integrity sha512-/9HIEs1ZXGhSPE8X6Ccm7Nam1z8KcoCqPdI7ecm1N33EzAetWahvQWVqLZtaZQ+IDKX4IyA2o0gBzqIMkAagHQ== + dependencies: + asn1 "~0.2.3" + assert-plus "^1.0.0" + bcrypt-pbkdf "^1.0.0" + dashdash "^1.12.0" + ecc-jsbn "~0.1.1" + getpass "^0.1.1" + jsbn "~0.1.0" + safer-buffer "^2.0.2" + tweetnacl "~0.14.0" + +statuses@2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/statuses/-/statuses-2.0.1.tgz#55cb000ccf1d48728bd23c685a063998cf1a1b63" + integrity sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ== + +steno@^0.4.1: + version "0.4.4" + resolved "https://registry.yarnpkg.com/steno/-/steno-0.4.4.tgz#071105bdfc286e6615c0403c27e9d7b5dcb855cb" + integrity sha512-EEHMVYHNXFHfGtgjNITnka0aHhiAlo93F7z2/Pwd+g0teG9CnM3JIINM7hVVB5/rhw9voufD7Wukwgtw2uqh6w== + dependencies: + graceful-fs "^4.1.3" + +stream-shift@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/stream-shift/-/stream-shift-1.0.1.tgz#d7088281559ab2778424279b0877da3c392d5a3d" + integrity sha512-AiisoFqQ0vbGcZgQPY1cdP2I76glaVA/RauYR4G4thNFgkTqr90yXTo4LYX60Jl+sIlPNHHdGSwo01AvbKUSVQ== + +string_decoder@^1.1.1: + version "1.3.0" + resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.3.0.tgz#42f114594a46cf1a8e30b0a84f56c78c3edac21e" + integrity sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA== + dependencies: + safe-buffer "~5.2.0" + +thread-stream@^0.15.1: + version "0.15.2" + resolved "https://registry.yarnpkg.com/thread-stream/-/thread-stream-0.15.2.tgz#fb95ad87d2f1e28f07116eb23d85aba3bc0425f4" + integrity sha512-UkEhKIg2pD+fjkHQKyJO3yoIvAP3N6RlNFt2dUhcS1FGvCD1cQa1M/PGknCLFIyZdtJOWQjejp7bdNqmN7zwdA== + dependencies: + real-require "^0.1.0" + +"through@>=2.2.7 <3": + version "2.3.8" + resolved "https://registry.yarnpkg.com/through/-/through-2.3.8.tgz#0dd4c9ffaabc357960b1b724115d7e0e86a2e1f5" + integrity sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg== + +toidentifier@1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/toidentifier/-/toidentifier-1.0.1.tgz#3be34321a88a820ed1bd80dfaa33e479fbb8dd35" + integrity sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA== + +toposort@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/toposort/-/toposort-2.0.2.tgz#ae21768175d1559d48bef35420b2f4962f09c330" + integrity sha512-0a5EOkAUp8D4moMi2W8ZF8jcga7BgZd91O/yabJCFY8az+XSzeGyTKs0Aoo897iV1Nj6guFq8orWDS96z91oGg== + +tough-cookie@~2.5.0: + version "2.5.0" + resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.5.0.tgz#cd9fb2a0aa1d5a12b473bd9fb96fa3dcff65ade2" + integrity sha512-nlLsUzgm1kfLXSXfRZMc1KLAugd4hqJHDTvc2hDIwS3mZAfMEuMbc03SujMF+GEcpaX/qboeycw6iO8JwVv2+g== + dependencies: + psl "^1.1.28" + punycode "^2.1.1" + +tr46@~0.0.3: + version "0.0.3" + resolved "https://registry.yarnpkg.com/tr46/-/tr46-0.0.3.tgz#8184fd347dac9cdc185992f3a6622e14b9d9ab6a" + integrity sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw== + +tsscmp@1.0.6: + version "1.0.6" + resolved "https://registry.yarnpkg.com/tsscmp/-/tsscmp-1.0.6.tgz#85b99583ac3589ec4bfef825b5000aa911d605eb" + integrity sha512-LxhtAkPDTkVCMQjt2h6eBVY28KCjikZqZfMcC15YBeNjkgUpdCfBu5HoiOTDu86v6smE8yOjyEktJ8hlbANHQA== + +tunnel-agent@^0.6.0: + version "0.6.0" + resolved "https://registry.yarnpkg.com/tunnel-agent/-/tunnel-agent-0.6.0.tgz#27a5dea06b36b04a0a9966774b290868f0fc40fd" + integrity sha512-McnNiV1l8RYeY8tBgEpuodCC1mLUdbSN+CYBL7kJsJNInOP8UjDDEwdk6Mw60vdLLrr5NHKZhMAOSrR2NZuQ+w== + dependencies: + safe-buffer "^5.0.1" + +tweetnacl@^0.14.3, tweetnacl@~0.14.0: + version "0.14.5" + resolved "https://registry.yarnpkg.com/tweetnacl/-/tweetnacl-0.14.5.tgz#5ae68177f192d4456269d108afa93ff8743f4f64" + integrity sha512-KXXFFdAbFXY4geFIwoyNK+f5Z1b7swfXABfL7HXCmoIWMKU3dmS26672A4EeQtDzLKy7SXmfBu51JolvEKwtGA== + +typanion@^3.8.0: + version "3.12.1" + resolved "https://registry.yarnpkg.com/typanion/-/typanion-3.12.1.tgz#d33deb130aba23ef6f2a3c69e7fb28148dd9089a" + integrity sha512-3SJF/czpzqq6G3lprGFLa6ps12yb1uQ1EmitNnep2fDMNh1aO/Zbq9sWY+3lem0zYb2oHJnQWyabTGUZ+L1ScQ== + +type-is@~1.6.18: + version "1.6.18" + resolved "https://registry.yarnpkg.com/type-is/-/type-is-1.6.18.tgz#4e552cd05df09467dcbc4ef739de89f2cf37c131" + integrity sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g== + dependencies: + media-typer "0.3.0" + mime-types "~2.1.24" + +typescript@^4.8.3: + version "4.9.5" + resolved "https://registry.yarnpkg.com/typescript/-/typescript-4.9.5.tgz#095979f9bcc0d09da324d58d03ce8f8374cbe65a" + integrity sha512-1FXk9E2Hm+QzZQ7z+McJiHL4NW1F2EzMu9Nq9i3zAaGqibafqYwCVU6WyWAuyQRRzOlxou8xZSyXLEN8oKj24g== + +uglify-js@^3.1.4: + version "3.17.4" + resolved "https://registry.yarnpkg.com/uglify-js/-/uglify-js-3.17.4.tgz#61678cf5fa3f5b7eb789bb345df29afb8257c22c" + integrity sha512-T9q82TJI9e/C1TAxYvfb16xO120tMVFZrGA3f9/P4424DNu6ypK103y0GPFVa17yotwSyZW5iYXgjYHkGrJW/g== + +unix-crypt-td-js@1.1.4: + version "1.1.4" + resolved "https://registry.yarnpkg.com/unix-crypt-td-js/-/unix-crypt-td-js-1.1.4.tgz#4912dfad1c8aeb7d20fa0a39e4c31918c1d5d5dd" + integrity sha512-8rMeVYWSIyccIJscb9NdCfZKSRBKYTeVnwmiRYT2ulE3qd1RaDQ0xQDP+rI3ccIWbhu/zuo5cgN8z73belNZgw== + +unpipe@1.0.0, unpipe@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/unpipe/-/unpipe-1.0.0.tgz#b2bf4ee8514aae6165b4817829d21b2ef49904ec" + integrity sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ== + +uri-js@^4.2.2: + version "4.4.1" + resolved "https://registry.yarnpkg.com/uri-js/-/uri-js-4.4.1.tgz#9b1a52595225859e55f669d928f88c6c57f2a77e" + integrity sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg== + dependencies: + punycode "^2.1.0" + +util-deprecate@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/util-deprecate/-/util-deprecate-1.0.2.tgz#450d4dc9fa70de732762fbd2d4a28981419a0ccf" + integrity sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw== + +utils-merge@1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/utils-merge/-/utils-merge-1.0.1.tgz#9f95710f50a267947b2ccc124741c1028427e713" + integrity sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA== + +uuid@^3.3.2: + version "3.4.0" + resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.4.0.tgz#b23e4358afa8a202fe7a100af1f5f883f02007ee" + integrity sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A== + +uuid@^9.0.0: + version "9.0.0" + resolved "https://registry.yarnpkg.com/uuid/-/uuid-9.0.0.tgz#592f550650024a38ceb0c562f2f6aa435761efb5" + integrity sha512-MXcSTerfPa4uqyzStbRoTgt5XIe3x5+42+q1sDuy3R5MDk66URdLMOZe5aPX/SQd+kuYAh0FdP/pO28IkQyTeg== + +validator@13.11.0: + version "13.11.0" + resolved "https://registry.yarnpkg.com/validator/-/validator-13.11.0.tgz#23ab3fd59290c61248364eabf4067f04955fbb1b" + integrity sha512-Ii+sehpSfZy+At5nPdnyMhx78fEoPDkR2XW/zimHEL3MyGJQOCQ7WeP20jPYRz7ZCpcKLB21NxuXHF3bxjStBQ== + +validator@13.9.0: + version "13.9.0" + resolved "https://registry.yarnpkg.com/validator/-/validator-13.9.0.tgz#33e7b85b604f3bbce9bb1a05d5c3e22e1c2ff855" + integrity sha512-B+dGG8U3fdtM0/aNK4/X8CXq/EcxU2WPrPEkJGslb47qyHsxmbggTWK0yEA4qnYVNF+nxNlN88o14hIcPmSIEA== + +vary@^1, vary@~1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/vary/-/vary-1.1.2.tgz#2299f02c6ded30d4a5961b0b9f74524a18f634fc" + integrity sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg== + +verdaccio-audit@12.0.0-next.2: + version "12.0.0-next.2" + resolved "https://registry.yarnpkg.com/verdaccio-audit/-/verdaccio-audit-12.0.0-next.2.tgz#5c675f5937aafc1426018aa9a63b8f759314eabd" + integrity sha512-QP7/E7xienc2Zr7KgnSWLlOYQ/DPSD4Q+++3CaKpMwQSHxHb3HEHN9z0H946QPhbAd9q+akSNsK6bbNG3zx9Eg== + dependencies: + "@verdaccio/config" "7.0.0-next.2" + "@verdaccio/core" "7.0.0-next.2" + express "4.18.2" + https-proxy-agent "5.0.1" + node-fetch cjs + +verdaccio-htpasswd@12.0.0-next.2: + version "12.0.0-next.2" + resolved "https://registry.yarnpkg.com/verdaccio-htpasswd/-/verdaccio-htpasswd-12.0.0-next.2.tgz#2128a1794746015e87233ee3a9faa1bf5c8b00c2" + integrity sha512-vWsxp/rF1E5JAtSsjT8AdyzefhSXuyuX6jz2hRvayGgDb7QSjSi94DFtJfEB3LEr7qx6C9z3XWk122z68PtC7A== + dependencies: + "@verdaccio/core" "7.0.0-next.2" + "@verdaccio/file-locking" "12.0.0-next.0" + apache-md5 "1.1.8" + bcryptjs "2.4.3" + core-js "3.30.2" + debug "4.3.4" + http-errors "2.0.0" + unix-crypt-td-js "1.1.4" + +verdaccio@^5.26.3: + version "5.26.3" + resolved "https://registry.yarnpkg.com/verdaccio/-/verdaccio-5.26.3.tgz#03b5a21ed18256cd1edbf12cca7d015959e01cc5" + integrity sha512-zw0IUHr0tKkxuO3BrdgBNNg3FFF6zKZmDWrsrLUI821kK5ke6t2UGtpc138mpKtNANmQW5QFp+txqBdSTBuC4g== + dependencies: + "@verdaccio/config" "7.0.0-next.2" + "@verdaccio/core" "7.0.0-next.2" + "@verdaccio/local-storage" "10.3.3" + "@verdaccio/logger-7" "7.0.0-next.2" + "@verdaccio/middleware" "7.0.0-next.2" + "@verdaccio/search" "7.0.0-next.1" + "@verdaccio/signature" "7.0.0-next.0" + "@verdaccio/streams" "10.2.1" + "@verdaccio/tarball" "12.0.0-next.2" + "@verdaccio/ui-theme" "7.0.0-next.2" + "@verdaccio/url" "12.0.0-next.2" + "@verdaccio/utils" "7.0.0-next.2" + JSONStream "1.3.5" + async "3.2.4" + clipanion "3.2.1" + compression "1.7.4" + cookies "0.8.0" + cors "2.8.5" + debug "^4.3.4" + envinfo "7.10.0" + express "4.18.2" + express-rate-limit "5.5.1" + fast-safe-stringify "2.1.1" + handlebars "4.7.8" + js-yaml "4.1.0" + jsonwebtoken "9.0.2" + kleur "4.1.5" + lodash "4.17.21" + lru-cache "7.18.3" + mime "3.0.0" + mkdirp "1.0.4" + mv "2.1.1" + pkginfo "0.4.1" + request "2.88.2" + semver "7.5.4" + validator "13.11.0" + verdaccio-audit "12.0.0-next.2" + verdaccio-htpasswd "12.0.0-next.2" + +verror@1.10.0: + version "1.10.0" + resolved "https://registry.yarnpkg.com/verror/-/verror-1.10.0.tgz#3a105ca17053af55d6e270c1f8288682e18da400" + integrity sha512-ZZKSmDAEFOijERBLkmYfJ+vmk3w+7hOLYDNkRCuRuMJGEmqYNCNLyBBFwWKVMhfwaEF3WOd0Zlw86U/WC/+nYw== + dependencies: + assert-plus "^1.0.0" + core-util-is "1.0.2" + extsprintf "^1.2.0" + +webidl-conversions@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-3.0.1.tgz#24534275e2a7bc6be7bc86611cc16ae0a5654871" + integrity sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ== + +whatwg-url@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/whatwg-url/-/whatwg-url-5.0.0.tgz#966454e8765462e37644d3626f6742ce8b70965d" + integrity sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw== + dependencies: + tr46 "~0.0.3" + webidl-conversions "^3.0.0" + +wordwrap@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-1.0.0.tgz#27584810891456a4171c8d0226441ade90cbcaeb" + integrity sha512-gvVzJFlPycKc5dZN4yPkP8w7Dc37BtP1yczEneOb4uq34pXZcvrtRTmWV8W+Ume+XCxKgbjM+nevkyFPMybd4Q== + +wrappy@1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f" + integrity sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ== + +yallist@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/yallist/-/yallist-4.0.0.tgz#9bb92790d9c0effec63be73519e11a35019a3a72" + integrity sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A== + +yup@0.32.11: + version "0.32.11" + resolved "https://registry.yarnpkg.com/yup/-/yup-0.32.11.tgz#d67fb83eefa4698607982e63f7ca4c5ed3cf18c5" + integrity sha512-Z2Fe1bn+eLstG8DRR6FTavGD+MeAwyfmouhHsIUgaADz8jvFKbO/fXc2trJKZg+5EBjh4gGm3iU/t3onKlXHIg== + dependencies: + "@babel/runtime" "^7.15.4" + "@types/lodash" "^4.14.175" + lodash "^4.17.21" + lodash-es "^4.17.21" + nanoclone "^0.2.1" + property-expr "^2.0.4" + toposort "^2.0.2" diff --git a/alfred/.github/workflows/semgrep.yml b/alfred/.github/workflows/semgrep.yml new file mode 100644 index 0000000..d354790 --- /dev/null +++ b/alfred/.github/workflows/semgrep.yml @@ -0,0 +1,45 @@ +name: Semgrep + +on: + # Scan changed files in PRs, block on new issues only (existing issues ignored) + pull_request: + branches: + - master + - main + - develop + - portal + + # Schedule this job to run at a certain time, using cron syntax + # Note that * is a special character in YAML so you have to quote this string + schedule: + - cron: '00 03 * * 0' # scheduled for 8.30 AM on every sunday + +jobs: + central-semgrep: + name: Static code Analysis + uses: navi-infosec/central-semgrep-action/.github/workflows/central-semgrep.yml@using-token + with: + github-event-number: ${{github.event.number}} + github-event-name: ${{github.event_name}} + github-repository: ${{github.repository}} + github-pr_owner_name: ${{github.event.pull_request.user.login}} + secrets: + READ_SEMGREP_RULES_TOKEN: ${{secrets.READ_SEMGREP_RULES_TOKEN}} + EMAIL_FETCH_TOKEN: ${{secrets.EMAIL_FETCH_TOKEN}} + + run-if-failed: + runs-on: [ self-hosted, Linux ] + needs: [central-semgrep] + if: always() && (needs.semgrep.result == 'failure') + steps: + - name: Create comment + if: ${{ ( github.event.number != '' ) }} + uses: navi-synced-actions/create-or-update-comment@v2 + with: + issue-number: ${{ github.event.pull_request.number }} + body: | + **Vulnerabilities have been discovered in this PR. Please check the vulnerability Analysis section of Semgrep Workflow to understand the security vulnerability. Feel free to reach out to #sast-help for more information ** + + - name: Assign Reviewers + if: ${{ ( github.event.number != '' ) }} + uses: navi-infosec/security-oncall-action@v1.1 \ No newline at end of file diff --git a/alfred/.gitignore b/alfred/.gitignore new file mode 100644 index 0000000..e091b9a --- /dev/null +++ b/alfred/.gitignore @@ -0,0 +1,26 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +/.idea +.idea +/out + +go.sum + +*.env + +*local-*.properties* +.DS_Store \ No newline at end of file diff --git a/alfred/Dockerfile b/alfred/Dockerfile new file mode 100644 index 0000000..e69de29 diff --git a/alfred/Dockerfile.collector b/alfred/Dockerfile.collector new file mode 100644 index 0000000..fd52e86 --- /dev/null +++ b/alfred/Dockerfile.collector @@ -0,0 +1,27 @@ +ARG GOLANG_TAG=193044292705.dkr.ecr.ap-south-1.amazonaws.com/common/golang:1.23.8 + +# To run locally, use +#ARG GOLANG_TAG=registry.cmd.navi-tech.in/common/golang:1.22.12 + +FROM ${GOLANG_TAG} as builder +RUN mkdir -p /build/collector +RUN mkdir -p /root/.ssh && echo "Host *\n StrictHostKeyChecking no" > ~/.ssh/config && chmod 0400 /root/.ssh +ARG SSH_KEY +RUN echo "${SSH_KEY}" > /root/.ssh/id_rsa \ + && chmod 0600 /root/.ssh/id_rsa \ + && git config --global url."git@github.com:".insteadOf "https://github.com/" \ + && go env -w GOPRIVATE='github.com/navi-*' +WORKDIR /build/collector/ +COPY . /build/collector/ +RUN /bin/bash -c "make build-collector" + +FROM ${GOLANG_TAG} +RUN mkdir -p /usr/local +WORKDIR /usr/local +RUN mkdir -p alfredTmp +COPY --from=0 /build/collector/alfred-collector /usr/local/ +COPY --from=0 /build/collector/config/application-collector.properties /usr/local/config/ +COPY --from=0 /build/collector/config/elasticapm-collector.properties /usr/local/config/ +RUN adduser --system --uid 4000 --disabled-password app-user && chown -R 4000:4000 /usr/local && chmod -R g+w /usr/local/ +USER 4000 +CMD /bin/bash -c "./alfred-collector" diff --git a/alfred/Dockerfile.core b/alfred/Dockerfile.core new file mode 100644 index 0000000..be3d824 --- /dev/null +++ b/alfred/Dockerfile.core @@ -0,0 +1,30 @@ +ARG GOLANG_TAG=193044292705.dkr.ecr.ap-south-1.amazonaws.com/common/golang:1.23.8 + +# To run locally, use +# ARG GOLANG_TAG=registry.cmd.navi-tech.in/common/golang:1.22.12 + +FROM ${GOLANG_TAG} as builder +RUN mkdir -p /build/core +RUN mkdir -p /root/.ssh && echo "Host *\n StrictHostKeyChecking no" > ~/.ssh/config && chmod 0400 /root/.ssh +ARG SSH_KEY +RUN echo "${SSH_KEY}" > /root/.ssh/id_rsa \ + && chmod 0600 /root/.ssh/id_rsa \ + && git config --global url."git@github.com:".insteadOf "https://github.com/" \ + && go env -w GOPRIVATE='github.com/navi-*' +WORKDIR /build/core/ +COPY . /build/core/ +RUN /bin/bash -c "make build-core" + +FROM ${GOLANG_TAG} +RUN mkdir -p /usr/local +RUN apt-get -y update +RUN apt-get install -y ffmpeg +RUN apt-get -y install webp +WORKDIR /usr/local +RUN mkdir -p alfredTmp +COPY --from=0 /build/core/alfred-core /usr/local/ +COPY --from=0 /build/core/config/application-core.properties /usr/local/config/ +COPY --from=0 /build/core/config/elasticapm-core.properties /usr/local/config/ +RUN adduser --system --uid 4000 --disabled-password app-user && chown -R 4000:4000 /usr/local && chmod -R g+w /usr/local/ +USER 4000 +CMD /bin/bash -c "./alfred-core" diff --git a/alfred/Dockerfile.ferret b/alfred/Dockerfile.ferret new file mode 100644 index 0000000..fc45bf8 --- /dev/null +++ b/alfred/Dockerfile.ferret @@ -0,0 +1,26 @@ +ARG GOLANG_TAG=193044292705.dkr.ecr.ap-south-1.amazonaws.com/common/golang:1.23.8 + +# To run locally, use +#ARG GOLANG_TAG=registry.cmd.navi-tech.in/common/golang:1.22.12 + +FROM ${GOLANG_TAG} as builder +RUN mkdir -p /build/ferret +RUN mkdir -p /root/.ssh && echo "Host *\n StrictHostKeyChecking no" > ~/.ssh/config && chmod 0400 /root/.ssh +ARG SSH_KEY +RUN echo "${SSH_KEY}" > /root/.ssh/id_rsa \ + && chmod 0600 /root/.ssh/id_rsa \ + && git config --global url."git@github.com:".insteadOf "https://github.com/" \ + && go env -w GOPRIVATE='github.com/navi-*' +WORKDIR /build/ferret/ +COPY . /build/ferret/ +RUN /bin/bash -c "make build-ferret" + +FROM ${GOLANG_TAG} +RUN mkdir -p /usr/local +WORKDIR /usr/local +COPY --from=0 /build/ferret/alfred-ferret /usr/local/ +COPY --from=0 /build/ferret/config/application-ferret.properties /usr/local/config/ +COPY --from=0 /build/ferret/config/elasticapm-ferret.properties /usr/local/config/ +RUN adduser --system --uid 4000 --disabled-password app-user && chown -R 4000:4000 /usr/local && chmod -R g+w /usr/local/ +USER 4000 +CMD /bin/bash -c "./alfred-ferret" diff --git a/alfred/Dockerfile.ingester b/alfred/Dockerfile.ingester new file mode 100644 index 0000000..6638a68 --- /dev/null +++ b/alfred/Dockerfile.ingester @@ -0,0 +1,26 @@ +ARG GOLANG_TAG=193044292705.dkr.ecr.ap-south-1.amazonaws.com/common/golang:1.23.8 + +# To run locally, use +# ARG GOLANG_TAG=registry.cmd.navi-tech.in/common/golang:1.22.12 + +FROM ${GOLANG_TAG} as builder +RUN mkdir -p /build/ingester +RUN mkdir -p /root/.ssh && echo "Host *\n StrictHostKeyChecking no" > ~/.ssh/config && chmod 0400 /root/.ssh +ARG SSH_KEY +RUN echo "${SSH_KEY}" > /root/.ssh/id_rsa \ + && chmod 0600 /root/.ssh/id_rsa \ + && git config --global url."git@github.com:".insteadOf "https://github.com/" \ + && go env -w GOPRIVATE='github.com/navi-*' +WORKDIR /build/ingester/ +COPY . /build/ingester/ +RUN /bin/bash -c "make build-ingester" + +FROM ${GOLANG_TAG} +RUN mkdir -p /usr/local +WORKDIR /usr/local +COPY --from=0 /build/ingester/alfred-ingester /usr/local/ +COPY --from=0 /build/ingester/config/application-ingester.properties /usr/local/config/ +COPY --from=0 /build/ingester/config/elasticapm-ingester.properties /usr/local/config/ +RUN adduser --system --uid 4000 --disabled-password app-user && chown -R 4000:4000 /usr/local && chmod -R g+w /usr/local/ +USER 4000 +CMD /bin/bash -c "./alfred-ingester" diff --git a/alfred/Makefile b/alfred/Makefile new file mode 100644 index 0000000..5a4e87f --- /dev/null +++ b/alfred/Makefile @@ -0,0 +1,47 @@ +.PHONY: build-ingester +build-ingester: + go mod tidy && CGO_ENABLED=0 go build -ldflags="-s -w" -o alfred-ingester cmd/ingester/main.go + +.PHONY: build-collector +build-collector: + go mod tidy && CGO_ENABLED=0 go build -ldflags="-s -w" -o alfred-collector cmd/collector/main.go + +.PHONY: build-core +build-core: + go mod tidy && CGO_ENABLED=0 go build -ldflags="-s -w" -o alfred-core cmd/core/main.go + +.PHONY: build-ferret +build-ferret: + go mod tidy && CGO_ENABLED=0 go build -ldflags="-s -w" -o alfred-ferret cmd/ferret/main.go + +.PHONY: build-ingester-docker-dev +build-ingester-docker-dev: build-ingester + docker build -t alfred-ingester . -f Dockerfile.ingester + +.PHONY: build-collector-docker-dev +build-collector-docker-dev: build-collector + docker build -t alfred-collector . -f Dockerfile.collector + +.PHONY: build-core-docker-dev +build-core-docker-dev: build-core + docker build alfred-core . -f Dockerfile.core + +.PHONY: build-ferret-docker-dev +build-ferret-docker-dev: build-ferret + docker build alfred-ferret . -f Dockerfile.ferret + +.PHONY: run-ingester-docker-dev +run-ingester-docker-dev: run-ingester + docker run alfred-ingester + +.PHONY: run-collector-docker-dev +run-collector-docker-dev: run-collector + docker run alfred-collector + +.PHONY: run-core-docker-dev +run-core-docker-dev: run-core + docker run alfred-core + +.PHONY: run-ferret-docker-dev +run-ferret-docker-dev: run-ferret + docker run alfred-ferret \ No newline at end of file diff --git a/alfred/api/.gitignore b/alfred/api/.gitignore new file mode 100644 index 0000000..e69de29 diff --git a/alfred/api/request/session_filter_request.go b/alfred/api/request/session_filter_request.go new file mode 100644 index 0000000..801a771 --- /dev/null +++ b/alfred/api/request/session_filter_request.go @@ -0,0 +1,34 @@ +package request + +type SessionFilters struct { + CustomerId string + DeviceId string + SessionId string + PhoneNumber string + StartTimestamp int64 + EndTimestamp int64 + Labels string + AppName string + ScreenName string + FragmentNames string + Vertical string + AppVersion string + ScreenTags string + CodePushVersion string + AgentEmailId string + SnapshotPerSecond string + SortBy string + AppOs string +} + +type WebSessionFilters struct { + StartTimestamp int64 + EndTimestamp int64 + AgentId string + TicketId string + SessionId string + DeviceId []string + ProjectName string + EmailId string + SortBy string +} diff --git a/alfred/api/response/app_dashboard.go b/alfred/api/response/app_dashboard.go new file mode 100644 index 0000000..edc0536 --- /dev/null +++ b/alfred/api/response/app_dashboard.go @@ -0,0 +1,69 @@ +package response + +import "alfred/model/ingester" + +type BaseAttributesDTO struct { + AppVersionCode string `json:"app_version_code,omitempty"` + AppVersionName string `json:"app_version_name,omitempty"` + DeviceId string `json:"device_id,omitempty"` + DeviceModel string `json:"device_model,omitempty"` + DeviceManufacturer string `json:"device_manufacturer,omitempty"` + ScreenResolution string `json:"screen_resolution,omitempty"` + AppOS string `json:"app_os,omitempty"` + OsVersion string `json:"os_version,omitempty"` + Latitude float32 `json:"latitude,omitempty"` + Longitude float32 `json:"longitude,omitempty"` + NetworkType string `json:"network_type,omitempty"` + CustomerId string `json:"customer_id,omitempty"` + UpTime int64 `json:"up_time,omitempty"` + CarrierName string `json:"carrier_name,omitempty"` + Metadata map[string]string `json:"metadata,omitempty"` + SessionId string `json:"session_id,omitempty"` + ParentSessionId string `json:"parent_session_id,omitempty"` + TraceId string `json:"trace_id,omitempty"` + SessionTimeStamp int64 `json:"session_time_stamp,omitempty"` + StartTimestamp int64 `json:"start_timestamp"` + EndTimestamp int64 `json:"end_timestamp"` + SnapshotPerSecond int64 `json:"snapshot_per_second,omitempty"` + HasErrors bool `json:"has_errors"` + ImageType string `json:"image_type,omitempty"` +} + +type VideoMetadata struct { + Duration *int64 `json:"duration,omitempty"` +} + +type SearchSessionResponseData struct { + DeviceAttributes []ingester.DeviceAttributes `json:"device_attributes"` + BaseAttributesDTO BaseAttributesDTO `json:"base_attributes"` + Labels []string `json:"labels"` + Metadata VideoMetadata `json:"metadata"` + TouchCounts int64 `json:"touch_counts"` + CreatedAt int64 `json:"created_at"` +} + +type FilterData struct { + Label string `json:"label"` + Value string `json:"value"` +} + +type SelectionConfig string + +const ( + MULTI_SELECT SelectionConfig = "MULTI_SELECT" + SINGLE_SELECT SelectionConfig = "SINGLE_SELECT" + RANGE_PICKER SelectionConfig = "RANGE_PICKER" +) + +type FilterResponseData struct { + FilterName string `json:"filter_name"` + FilterKey string `json:"filter_key"` + FilterData []FilterData `json:"filter_data"` + SelectionConfig SelectionConfig `json:"selection_config"` +} + +type DropdownResponseData struct { + DropdownKey string `json:"dropdown_key"` + FilterData []FilterData `json:"data"` + SelectionConfig SelectionConfig `json:"selection_config"` +} diff --git a/alfred/api/response/fetch_session.go b/alfred/api/response/fetch_session.go new file mode 100644 index 0000000..4f0c5d2 --- /dev/null +++ b/alfred/api/response/fetch_session.go @@ -0,0 +1,42 @@ +package response + +type SessionResponseData struct { + Link string `json:"link"` + SessionId string `json:"sessionId,omitempty"` + DeviceId string `json:"deviceId,omitempty"` + CustomerId string `json:"customerId,omitempty"` + PhoneNumber string `json:"phoneNumber,omitempty"` + Model string `json:"model,omitempty"` + DeviceCarrierName string `json:"deviceCarrierName,omitempty"` + AppVersionCode string `json:"appVersionCode,omitempty"` + AppVersionName string `json:"appVersionname,omitempty"` + DeviceOs string `json:"deviceOs,omitempty"` + RecordStartingTime int64 `json:"recordStartingTime,omitempty"` + Labels []string `json:"labels,omitempty"` +} + +type SessionErrorData struct { + SessionId string `json:"sessionId,omitempty"` + DeviceId string `json:"deviceId,omitempty"` + CustomerId string `json:"customerId,omitempty"` + PhoneNumber string `json:"phoneNumber,omitempty"` +} + +type VideoGenerationStatus struct { + Link string `json:"link"` + SessionId string `json:"sessionId,omitempty"` + DeviceId string `json:"deviceId,omitempty"` + CustomerId string `json:"customerId,omitempty"` + PhoneNumber string `json:"phoneNumber,omitempty"` + Model string `json:"model,omitempty"` + DeviceCarrierName string `json:"deviceCarrierName,omitempty"` + AppVersionCode string `json:"appVersionCode,omitempty"` + AppVersionName string `json:"appVersionname,omitempty"` + DeviceOs string `json:"deviceOs,omitempty"` + RecordStartingTime int64 `json:"recordStartingTime,omitempty"` + Labels []string `json:"labels,omitempty"` + FragmentsCompletedTillNow int `json:"fragmentsCompletedTillNow"` + TotalFragments int `json:"TotalFragments"` + LatestUrl string `json:"LatestUrl,omitempty"` + VideoGeneratedTillNow int `json:"videoGeneratedTillNow"` +} diff --git a/alfred/api/response/web_dashboard.go b/alfred/api/response/web_dashboard.go new file mode 100644 index 0000000..0422c39 --- /dev/null +++ b/alfred/api/response/web_dashboard.go @@ -0,0 +1,11 @@ +package response + +import ( + "alfred/model/ingester" +) + +type WebSessionResponseData struct { + BaseAttributesDTO ingester.WebBaseAttributes `json:"base_attributes"` + SessionAttributes ingester.WebSessionAttributes `json:"session_attribute,omitempty"` + DurationInMillis int64 `json:"duration,omitempty"` +} diff --git a/alfred/application.yml b/alfred/application.yml new file mode 100644 index 0000000..08038f5 --- /dev/null +++ b/alfred/application.yml @@ -0,0 +1,35 @@ +APP_VERSION: "0.0.1" + +APP_NAME: "alfred" +ENVIRONMENT: "DEVELOPMENT" +LOG_LEVEL: "DEBUG" +APP_PORT: 9999 +TIMEZONE: "Asia/Kolkata" +DEFAULT_LOCALE: "en" + +# Prometheus Config +PROMETHEUS_APP_NAME: "alfred" +PROMETHEUS_HOST: "localhost" +PROMETHEUS_PORT: 4001 +PROMETHEUS_ENABLED: true +PROMETHEUS_TIMEOUT: 10 +PROMETHEUS_FLUSH_INTERVAL_IN_MS: 200 +PROMETHEUS_HISTOGRAM_BUCKETS: 50.0,75.0,90.0,95.0,99.0 + +#Sentry Configs +SENTRY_ENABLED: "false" +SENTRY_DSN: "dummy" + +## Redis +REDIS_HOST: "localhost" +REDIS_PORT: 6379 +REDIS_USERNAME: "root" +REDIS_PASSWORD: "password" +REDIS_MAX_ACTIVE_CONNECTIONS: 50 +REDIS_MAX_IDLE_CONNECTIONS: 50 +REDIS_READ_TIMEOUT_MS: 500 +REDIS_CONNECTION_TIMEOUT_MS: 1000 +REDIS_DB_ID: 0 + +# Translations +TRANSLATIONS_PATH: "./i18n" diff --git a/alfred/ci.sample.yml b/alfred/ci.sample.yml new file mode 100644 index 0000000..08038f5 --- /dev/null +++ b/alfred/ci.sample.yml @@ -0,0 +1,35 @@ +APP_VERSION: "0.0.1" + +APP_NAME: "alfred" +ENVIRONMENT: "DEVELOPMENT" +LOG_LEVEL: "DEBUG" +APP_PORT: 9999 +TIMEZONE: "Asia/Kolkata" +DEFAULT_LOCALE: "en" + +# Prometheus Config +PROMETHEUS_APP_NAME: "alfred" +PROMETHEUS_HOST: "localhost" +PROMETHEUS_PORT: 4001 +PROMETHEUS_ENABLED: true +PROMETHEUS_TIMEOUT: 10 +PROMETHEUS_FLUSH_INTERVAL_IN_MS: 200 +PROMETHEUS_HISTOGRAM_BUCKETS: 50.0,75.0,90.0,95.0,99.0 + +#Sentry Configs +SENTRY_ENABLED: "false" +SENTRY_DSN: "dummy" + +## Redis +REDIS_HOST: "localhost" +REDIS_PORT: 6379 +REDIS_USERNAME: "root" +REDIS_PASSWORD: "password" +REDIS_MAX_ACTIVE_CONNECTIONS: 50 +REDIS_MAX_IDLE_CONNECTIONS: 50 +REDIS_READ_TIMEOUT_MS: 500 +REDIS_CONNECTION_TIMEOUT_MS: 1000 +REDIS_DB_ID: 0 + +# Translations +TRANSLATIONS_PATH: "./i18n" diff --git a/alfred/cmd/collector/app/dependency/dependecies.go b/alfred/cmd/collector/app/dependency/dependecies.go new file mode 100644 index 0000000..8cb44ed --- /dev/null +++ b/alfred/cmd/collector/app/dependency/dependecies.go @@ -0,0 +1,21 @@ +package dependency + +import ( + "alfred/config" + kafka "alfred/pkg/kafka/produce" + "alfred/pkg/s3" + "alfred/repository" + "alfred/repositoryAccessLayer" +) + +type CollectorDependencies struct { +} + +func InitCollectorDependencies() { + kafkaProducer := kafka.NewKProducer(config.GetCollectorConfig().BaseConfig.Env, config.GetCollectorConfig().KafkaConfig.BaseConfig) + esConfig := config.GetCollectorConfig().ElasticSearchConfig.BaseConfig + repositories := repository.InitRepositories(esConfig) + repositoryAccessLayer := repositoryAccessLayer.InitRepositoryAccessLayer(repositories) + s3Client := s3.NewS3Client() + InitConsumer(repositoryAccessLayer, s3Client, kafkaProducer) +} diff --git a/alfred/cmd/collector/app/dependency/listeners.go b/alfred/cmd/collector/app/dependency/listeners.go new file mode 100644 index 0000000..5894373 --- /dev/null +++ b/alfred/cmd/collector/app/dependency/listeners.go @@ -0,0 +1,71 @@ +package dependency + +import ( + "alfred/cmd/collector/listener" + "alfred/config" + kafka "alfred/pkg/kafka/produce" + "alfred/pkg/log" + "alfred/pkg/s3" + "alfred/repositoryAccessLayer" + "context" +) + +func InitConsumer(repositories *repositoryAccessLayer.RepositoryAccessLayer, s3Client s3.S3Client, kafkaProducer kafka.KProducer) { + sessionUploadListener := listener.NewSessionUploadListener(repositories, kafkaProducer) + eventIngestListener := listener.NewEventIngestListener(repositories, kafkaProducer) + webSessionListener := listener.NewWebSessionUploadListener(repositories, s3Client) + errorEventsIngestListener := listener.NewErrorEventsIngestListener(repositories, kafkaProducer) + + //to keep it running on separate goroutine without blocking main thread + go func() { + for { + sessionUploadListener.ConsumerGroup.Consume(context.Background(), + []string{config.GetCollectorConfig().KafkaConfig.SessionUploadTopic}, + sessionUploadListener) + } + log.Error("consumer for session upload stopped.") + }() + + // todo: in phase 2 + //go func() { + // metricUploadListener.consumerGroup.Consume(context.Background(), + // viper.GetStringSlice("kafka.alfred.mobile.metric.ingestion.topic"), + // metricUploadListener) + //}() + + go func() { + for { + eventIngestListener.ConsumerGroup.Consume(context.Background(), + []string{config.GetCollectorConfig().KafkaConfig.EventIngestionTopic}, + eventIngestListener) + } + log.Error("consumer for event ingestion stopped.") + }() + + go func() { + for { + webSessionListener.ConsumerGroup.Consume(context.Background(), + []string{config.GetCollectorConfig().KafkaConfig.WebSessionUploadTopic}, + webSessionListener) + } + log.Error("consumer for session upload stopped.") + }() + + go func() { + for { + errorEventsIngestListener.ConsumerGroup.Consume(context.Background(), + []string{config.GetCollectorConfig().KafkaConfig.ErrorEventsUploadTopic}, + errorEventsIngestListener) + } + log.Error("consumer for error-events stopped.") + }() + + // go func() { + // for { + // errorEventsUpdateListener.ConsumerGroup.Consume(context.Background(), + // []string{config.GetCollectorConfig().KafkaConfig.ErrorEventsUpdateTopic}, + // errorEventsUpdateListener) + // } + // log.Error("consumer for error-events update stopped.") + // }() +} diff --git a/alfred/cmd/collector/app/server.go b/alfred/cmd/collector/app/server.go new file mode 100644 index 0000000..16e7804 --- /dev/null +++ b/alfred/cmd/collector/app/server.go @@ -0,0 +1,44 @@ +package app + +import ( + "alfred/cmd/collector/app/dependency" + "alfred/config" + "alfred/internal/metrics" + "alfred/pkg/log" + "alfred/utils" + "fmt" + "github.com/gin-gonic/gin" + "go.elastic.co/apm/module/apmgin/v2" + "go.uber.org/zap" + "net/http" + "strconv" +) + +type Server struct { + gin *gin.Engine +} + +func NewServer(gin *gin.Engine) *Server { + dependency.InitCollectorDependencies() + return &Server{ + gin: gin, + } +} + +func (s *Server) Handler() { + s.gin.Use(apmgin.Middleware(s.gin)) + metrics.AdminHandler(config.GetCollectorConfig().BaseConfig.MetricPort) + s.healthCheckHandler() +} + +func (s *Server) healthCheckHandler() { + s.gin.GET(utils.PING, func(c *gin.Context) { + c.String(http.StatusOK, utils.PONG) + }) +} + +func (s *Server) Start() { + s.Handler() + log.Info("starting alfred collector server", zap.String("port", strconv.Itoa(config.GetCollectorConfig().BaseConfig.Port))) + s.gin.Run(fmt.Sprintf(":%v", config.GetCollectorConfig().BaseConfig.Port)) +} diff --git a/alfred/cmd/collector/helper/client_identifier.go b/alfred/cmd/collector/helper/client_identifier.go new file mode 100644 index 0000000..e0beb2f --- /dev/null +++ b/alfred/cmd/collector/helper/client_identifier.go @@ -0,0 +1,22 @@ +package helper + +import ( + "alfred/utils" + "github.com/Shopify/sarama" +) + +func IdentifyClientThroughHeader(headers []*sarama.RecordHeader) string { + + client := utils.NAVI_USER_APP + for _, header := range headers { + key := header.Key + encodedValue := header.Value + + if string(key) == utils.CLIENT_NAME { + client = string(encodedValue) + break + } + } + return client + +} diff --git a/alfred/cmd/collector/helper/error_events_builder.go b/alfred/cmd/collector/helper/error_events_builder.go new file mode 100644 index 0000000..2555582 --- /dev/null +++ b/alfred/cmd/collector/helper/error_events_builder.go @@ -0,0 +1,78 @@ +package helper + +import ( + "alfred/config" + "alfred/internal/clients" + "alfred/model/ferret" + "alfred/model/ingester" + "alfred/utils" +) + +func BuildESUploadErrorEvent(alfredEvent ingester.BaseAttributes, zipName string, client string, err error, index, eventId string, statusCode int) ferret.ErrorEventsAttributes { + return ferret.ErrorEventsAttributes{ + ferret.ErrorAttribute{ + SessionId: alfredEvent.SessionId, + ClientName: ferret.ClientName(client), + DeviceId: alfredEvent.DeviceId, + CustomerId: alfredEvent.CustomerId, + PhoneNumber: alfredEvent.PhoneNumber, + AppVersionCode: alfredEvent.AppVersionCode, + AppVersionName: alfredEvent.AppVersionName, + }, + []ferret.ErrorEvent{{ + ErrorTimestamp: utils.GetCurrentTimeInMillis(), + ZipNames: []string{zipName}, + ErrorType: ferret.ES_UPLOAD_FAILURE, + RequestURL: ferret.RequestUrl(index), + RequestMethod: ferret.POST, + ClientTs: alfredEvent.ClientTs, + ErrorName: ferret.ES_UPLOAD, + ErrorStatusCode: statusCode, + ErrorMessage: err.Error(), + IsActive: true, + SessionId: alfredEvent.SessionId, + EventIdList: []string{eventId}, + }}, + } +} + +func BuildErrorEventsSlackMessage(errorEvent ferret.ErrorEventAttribute) clients.SlackRequest { + slackMessage := make(map[string]interface{}) + slackMessage["session_id"] = errorEvent.ErrorAttribute.SessionId + slackMessage["client_name"] = errorEvent.ErrorAttribute.ClientName + slackMessage["device_id"] = errorEvent.ErrorAttribute.DeviceId + slackMessage["customer_id"] = errorEvent.ErrorAttribute.CustomerId + slackMessage["phone_number"] = errorEvent.ErrorAttribute.PhoneNumber + slackMessage["error_timestamp"] = errorEvent.ErrorEvent.ErrorTimestamp + slackMessage["zip_names"] = errorEvent.ErrorEvent.ZipNames + slackMessage["error_type"] = errorEvent.ErrorEvent.ErrorType + slackMessage["request_url"] = errorEvent.ErrorEvent.RequestURL + slackMessage["request_method"] = errorEvent.ErrorEvent.RequestMethod + slackMessage["client_ts"] = errorEvent.ErrorEvent.ClientTs + slackMessage["error_name"] = errorEvent.ErrorEvent.ErrorName + slackMessage["error_status_code"] = errorEvent.ErrorEvent.ErrorStatusCode + slackMessage["error_message"] = errorEvent.ErrorEvent.ErrorMessage + slackMessage["is_active"] = errorEvent.ErrorEvent.IsActive + slackMessage["network_strength_in_kbps"] = errorEvent.ErrorEvent.NetworkStrengthInKbps + slackMessage["event_session_id"] = errorEvent.ErrorEvent.SessionId + slackMessage["event_id_list"] = errorEvent.ErrorEvent.EventIdList + + return clients.SlackRequest{ + Data: slackMessage, + TemplateId: config.GetCollectorConfig().ErrorEventsSlackTemplateId, + } +} + +func ErrorEventsValidator(event ferret.ErrorEventAttribute) bool { + allowedFutureTimestamp := config.GetCollectorConfig().FutureTimestampValidationDiffInHours + allowedPastTimestamp := config.GetCollectorConfig().PastTimestampValidationDiffInHours + isValidTimeStamp := utils.ValidatePresentTime(event.ErrorEvent.ErrorTimestamp, allowedPastTimestamp, allowedFutureTimestamp) + if !isValidTimeStamp { + return false + } + if utils.Contains(config.GetCollectorConfig().IngestErrorEventsFilter, event.ErrorEvent.ErrorMessage) { + return false + } + return true + +} diff --git a/alfred/cmd/collector/listener/error_events_ingest_listener.go b/alfred/cmd/collector/listener/error_events_ingest_listener.go new file mode 100644 index 0000000..40e9d46 --- /dev/null +++ b/alfred/cmd/collector/listener/error_events_ingest_listener.go @@ -0,0 +1,118 @@ +package listener + +import ( + "alfred/cmd/collector/helper" + "alfred/config" + "alfred/internal/metrics" + "alfred/model/ferret" + "alfred/pkg/kafka" + kafka2 "alfred/pkg/kafka/produce" + "alfred/pkg/log" + "alfred/repositoryAccessLayer" + "alfred/utils" + "encoding/json" + "github.com/Shopify/sarama" + "go.uber.org/zap" + "os" + "time" +) + +type ErrorEventsIngestListener interface { +} + +type ErrorEventsIngestListenerImpl struct { + ConsumerGroup sarama.ConsumerGroup + errorEventsAccessLayer repositoryAccessLayer.ErrorEventsAccessLayer + kafkaProducer kafka2.KProducer +} + +func NewErrorEventsIngestListener(repositories *repositoryAccessLayer.RepositoryAccessLayer, kafkaProducer kafka2.KProducer) *ErrorEventsIngestListenerImpl { + errorEventsIngestListener, err := kafka.SaramaKafkaConsumer( + config.GetCollectorConfig().BaseConfig.Env, + config.GetCollectorConfig().KafkaConfig.BaseConfig, + config.GetCollectorConfig().KafkaConfig.ErrorEventsUploadTopicGroupId, + ) + if err != nil { + log.Error("error-events ingest listener initialisation failed", zap.Error(err)) + os.Exit(1) + } + + return &ErrorEventsIngestListenerImpl{ + ConsumerGroup: errorEventsIngestListener, + errorEventsAccessLayer: repositories.ErrorEventsAccessLayer, + kafkaProducer: kafkaProducer, + } +} + +func (eil *ErrorEventsIngestListenerImpl) Setup(consumerGroupSession sarama.ConsumerGroupSession) error { + log.Info("setup error-events ingest kafka listener", zap.String("memberId", consumerGroupSession.MemberID())) + return nil +} + +func (eil *ErrorEventsIngestListenerImpl) Cleanup(consumerGroupSession sarama.ConsumerGroupSession) error { + log.Info("cleanup error-events ingest kafka listener", zap.String("memberId", consumerGroupSession.MemberID())) + return nil +} + +func (eil *ErrorEventsIngestListenerImpl) ConsumeClaim(consumerGroupSession sarama.ConsumerGroupSession, consumerGroupClaim sarama.ConsumerGroupClaim) error { + for message := range consumerGroupClaim.Messages() { + eil.processMessage(message) + consumerGroupSession.MarkMessage(message, utils.EMPTY) + } + return nil +} + +func (eil *ErrorEventsIngestListenerImpl) processMessage(message *sarama.ConsumerMessage) { + defer func() { + recover() + }() + + var alfredErrorEvents ferret.ErrorEventsAttributes + if err := json.Unmarshal(message.Value, &alfredErrorEvents); err != nil { + metrics.KafkaEventConsumptionEventFailureCounter.WithLabelValues(message.Topic).Inc() + log.Error("json unmarshalling failed while ingesting error-events data to elasticsearch", zap.Error(err)) + } + metrics.KafkaEventConsumptionEventSuccessCounter.WithLabelValues(message.Topic).Inc() + clientName := string(alfredErrorEvents.ErrorAttribute.ClientName) + index := config.GetCollectorConfig().ElasticSearchConfig.ErrorEventsUploadIndexClientMap[clientName] + for _, errorEvent := range alfredErrorEvents.ErrorEvents { + errorEvent.IsActive = true + event := ferret.ErrorEventAttribute{ + ErrorAttribute: alfredErrorEvents.ErrorAttribute, + ErrorEvent: errorEvent, + CreatedAt: utils.GetCurrentTimeInMillis(), + } + isValidErrorEvent := helper.ErrorEventsValidator(event) + if isValidErrorEvent { + go func() { + retryFunc := func() (interface{}, error) { + esStatusCode, err := eil.uploadErrorEventToElasticSearch(event, index) + return esStatusCode, err + } + _, err := utils.RetryFunctionWithResponseAndError(retryFunc, config.GetCollectorConfig().MaxRetry, config.GetCollectorConfig().InitialDelayInSeconds*time.Second) + if err != nil { + log.Error("error ingesting error-events data to elasticsearch", zap.Error(err)) + errorEventsSlackMessage := helper.BuildErrorEventsSlackMessage(event) + err = eil.kafkaProducer.SendMessage(errorEventsSlackMessage, config.GetCollectorConfig().KafkaConfig.ErrorEventsSlackPushTopic, event.ErrorAttribute.DeviceId, clientName) + if err != nil { + log.Error("error publishing error-events data to kafka", zap.Error(err)) + } + return + } + }() + } + } +} + +func (eil *ErrorEventsIngestListenerImpl) uploadErrorEventToElasticSearch(errorEvent ferret.ErrorEventAttribute, index string) (int, error) { + errorEventString, err := json.Marshal(errorEvent) + if err != nil { + log.Error("error marshalling error event", zap.Error(err)) + } + esStatusCode, err := eil.errorEventsAccessLayer.UploadErrorEvents(string(errorEventString), index) + if err != nil { + log.Error("error ingesting error event to elasticsearch", zap.Error(err)) + return esStatusCode, err + } + return esStatusCode, nil +} diff --git a/alfred/cmd/collector/listener/error_events_update_listener.go b/alfred/cmd/collector/listener/error_events_update_listener.go new file mode 100644 index 0000000..f7da615 --- /dev/null +++ b/alfred/cmd/collector/listener/error_events_update_listener.go @@ -0,0 +1,154 @@ +package listener + +import ( + "alfred/cmd/collector/helper" + "alfred/config" + "alfred/internal/metrics" + "alfred/model/es" + "alfred/model/ingester" + "alfred/pkg/kafka" + "alfred/pkg/log" + "alfred/repositoryAccessLayer" + "alfred/utils" + "encoding/json" + "github.com/Shopify/sarama" + "go.uber.org/zap" + "os" + "time" +) + +type ErrorEventsUpdateListener struct { + ConsumerGroup sarama.ConsumerGroup + errorEventsAccessLayer repositoryAccessLayer.ErrorEventsAccessLayer + sessionsAccessLayer repositoryAccessLayer.SessionsAccessLayer +} + +func NewErrorEventsUpdateListener(repositories *repositoryAccessLayer.RepositoryAccessLayer) *ErrorEventsUpdateListener { + errorEventsUpdateListener, err := kafka.SaramaKafkaConsumer( + config.GetCollectorConfig().BaseConfig.Env, + config.GetCollectorConfig().KafkaConfig.BaseConfig, + config.GetCollectorConfig().KafkaConfig.ErrorEventsUpdateTopicGroupId, + ) + if err != nil { + log.Error("error events update listener initialisation failed", zap.Error(err)) + os.Exit(1) + } + + return &ErrorEventsUpdateListener{ + ConsumerGroup: errorEventsUpdateListener, + errorEventsAccessLayer: repositories.ErrorEventsAccessLayer, + sessionsAccessLayer: repositories.SessionsAccessLayer, + } +} + +func (eul *ErrorEventsUpdateListener) Setup(consumerGroupSession sarama.ConsumerGroupSession) error { + log.Info("setup error events update kafka listener", zap.String("memberId", consumerGroupSession.MemberID())) + return nil +} + +func (eul *ErrorEventsUpdateListener) Cleanup(consumerGroupSession sarama.ConsumerGroupSession) error { + log.Info("cleanup error events update kafka listener", zap.String("memberId", consumerGroupSession.MemberID())) + return nil +} + +func (eul *ErrorEventsUpdateListener) ConsumeClaim(consumerGroupSession sarama.ConsumerGroupSession, consumerGroupClaim sarama.ConsumerGroupClaim) error { + for message := range consumerGroupClaim.Messages() { + eul.processMessage(message) + consumerGroupSession.MarkMessage(message, "") + } + return nil +} + +func (eul *ErrorEventsUpdateListener) processMessage(message *sarama.ConsumerMessage) { + defer func() { + recover() + }() + client := helper.IdentifyClientThroughHeader(message.Headers) + var alfredSessionRecordingEvent ingester.SessionUploadRequest + err := json.Unmarshal(message.Value, &alfredSessionRecordingEvent) + if err != nil { + metrics.KafkaEventConsumptionEventFailureCounter.WithLabelValues(message.Topic).Inc() + log.Error("error unmarshalling session upload event in error event update listener", zap.Error(err)) + return + } + metrics.KafkaEventConsumptionEventSuccessCounter.WithLabelValues(message.Topic).Inc() + retryFunc := func() error { + err := eul.fetchErrorEventsForSession(alfredSessionRecordingEvent, client) + return err + } + err = utils.RetryFunctionWithError(retryFunc, config.GetCollectorConfig().MaxRetry, config.GetCollectorConfig().InitialDelayInSeconds*time.Second) + if err != nil { + log.Error("error handling and removing error events for session", zap.String("session_id", alfredSessionRecordingEvent.BaseAttributes.SessionId), zap.String("zipName", alfredSessionRecordingEvent.SessionUploadEventAttributes.EventId), zap.Error(err)) + } +} + +func (eul *ErrorEventsUpdateListener) fetchErrorEventsForSession(alfredSessionRecordingEvent ingester.SessionUploadRequest, client string) error { + sessionUploadIndex := config.GetCollectorConfig().ElasticSearchConfig.AppSessionUploadIndexClientMap[client] + errorEventsUploadIndex := config.GetCollectorConfig().ElasticSearchConfig.ErrorEventsUploadIndexClientMap[client] + sessionErrorEventsFilter := config.GetCollectorConfig().SessionErrorEventsFilter + keyValueMap := make(map[string][]string) + keyValueMap["error_attributes.session_id"] = []string{alfredSessionRecordingEvent.BaseAttributes.SessionId} + keyValueMap["error_event.error_name"] = sessionErrorEventsFilter + var sessionErrorEvents []es.ErrorEventsResponse + var err error + retryFunc := func() error { + sessionErrorEvents, err = eul.errorEventsAccessLayer.FetchSessionErrorEventsWithKeyValue(keyValueMap, errorEventsUploadIndex) + return err + } + err = utils.RetryFunctionWithError(retryFunc, config.GetCollectorConfig().MaxRetry, config.GetCollectorConfig().InitialDelayInSeconds*time.Second) + if err != nil { + log.Error("error fetching session error events from session id", zap.String("session_id", alfredSessionRecordingEvent.BaseAttributes.SessionId), zap.String("zipName", alfredSessionRecordingEvent.SessionUploadEventAttributes.EventId), zap.Error(err)) + return err + } + + if len(sessionErrorEvents) == 0 { + return nil + } + keyValueMap["error_event.zip_name"] = []string{alfredSessionRecordingEvent.SessionUploadEventAttributes.EventId} + var sessionErrorEventsWithZipName []es.ErrorEventsResponse + retryFunc = func() error { + sessionErrorEventsWithZipName, err = eul.errorEventsAccessLayer.FetchSessionErrorEventsWithKeyValue(keyValueMap, errorEventsUploadIndex) + return err + } + err = utils.RetryFunctionWithError(retryFunc, config.GetCollectorConfig().MaxRetry, config.GetCollectorConfig().InitialDelayInSeconds*time.Second) + if err != nil { + log.Error("error fetching session error events from session id with zip", zap.String("session_id", alfredSessionRecordingEvent.BaseAttributes.SessionId), zap.String("zipName", alfredSessionRecordingEvent.SessionUploadEventAttributes.EventId), zap.Error(err)) + return err + } + + if len(sessionErrorEventsWithZipName) == 0 { + return nil + } + var errorEventsdocIdList []string + var errorEventsindexList []string + for _, sessionErrorEvent := range sessionErrorEventsWithZipName { + errorEventsdocIdList = append(errorEventsdocIdList, sessionErrorEvent.DocId) + errorEventsindexList = append(errorEventsindexList, sessionErrorEvent.Index) + } + return eul.updateErrorEventsForSession(errorEventsdocIdList, errorEventsindexList, sessionErrorEvents, sessionErrorEventsWithZipName, alfredSessionRecordingEvent, sessionUploadIndex) +} + +func (eul *ErrorEventsUpdateListener) updateErrorEventsForSession(errorEventsdocIdList []string, errorEventsindexList []string, sessionErrorEvents []es.ErrorEventsResponse, sessionErrorEventsWithZipName []es.ErrorEventsResponse, alfredSessionRecordingEvent ingester.SessionUploadRequest, sessionUploadIndex string) error { + var err error + retryFunc := func() error { + err = eul.errorEventsAccessLayer.UpdateErrorEventsInActiveBulk(errorEventsdocIdList, errorEventsindexList) + return err + } + err = utils.RetryFunctionWithError(retryFunc, config.GetCollectorConfig().MaxRetry, config.GetCollectorConfig().InitialDelayInSeconds*time.Second) + if err != nil { + log.Error("error removing error events from session id", zap.String("session_id", alfredSessionRecordingEvent.BaseAttributes.SessionId), zap.String("zipName", alfredSessionRecordingEvent.SessionUploadEventAttributes.EventId), zap.Error(err)) + return err + } + if len(sessionErrorEvents) == len(sessionErrorEventsWithZipName) { + retryFunc = func() error { + err = eul.sessionsAccessLayer.UpdateSessionErrorEventsWithSessionId([]string{alfredSessionRecordingEvent.BaseAttributes.SessionId}, []string{sessionUploadIndex + "*"}, false) + return err + } + err = utils.RetryFunctionWithError(retryFunc, config.GetCollectorConfig().MaxRetry, config.GetCollectorConfig().InitialDelayInSeconds*time.Second) + if err != nil { + log.Error("error updating session for no error events", zap.String("session_id", alfredSessionRecordingEvent.BaseAttributes.SessionId), zap.String("zipName", alfredSessionRecordingEvent.SessionUploadEventAttributes.EventId), zap.Error(err)) + return err + } + } + return nil +} diff --git a/alfred/cmd/collector/listener/event_ingest_listener.go b/alfred/cmd/collector/listener/event_ingest_listener.go new file mode 100644 index 0000000..12e5f58 --- /dev/null +++ b/alfred/cmd/collector/listener/event_ingest_listener.go @@ -0,0 +1,174 @@ +package listener + +import ( + "alfred/cmd/collector/helper" + "alfred/config" + "alfred/internal/metrics" + "alfred/model/ingester" + "alfred/pkg/cache" + "alfred/pkg/kafka" + kafka2 "alfred/pkg/kafka/produce" + "alfred/pkg/log" + "alfred/repositoryAccessLayer" + "alfred/utils" + "encoding/json" + "github.com/Shopify/sarama" + "go.uber.org/zap" + "golang.org/x/sync/errgroup" + "os" + "time" +) + +type EventIngestListener struct { + ConsumerGroup sarama.ConsumerGroup + eventsAccessLayer repositoryAccessLayer.EventsAccessLayer + appFragmentAccessLayer repositoryAccessLayer.AppFragmentsAccessLayer + cacheClientForFragments cache.ConfigClientInterface + kafkaProducer kafka2.KProducer + goroutineGroup *errgroup.Group +} + +type appEvent struct { + BaseAttributes ingester.BaseAttributes `json:"base_attributes,omitempty"` + MetricsAttributes ingester.EventAttributes `json:"events,omitempty"` + CreatedAt int64 `json:"created_at,omitempty"` +} + +func NewEventIngestListener(repositories *repositoryAccessLayer.RepositoryAccessLayer, kafkaProducer kafka2.KProducer) *EventIngestListener { + eventUploadListener, err := kafka.SaramaKafkaConsumer( + config.GetCollectorConfig().BaseConfig.Env, + config.GetCollectorConfig().KafkaConfig.BaseConfig, + config.GetCollectorConfig().KafkaConfig.EventIngestionTopicGroupId) + if err != nil { + log.Error("event ingest listener initialisation failed", zap.Error(err)) + os.Exit(1) + } + + group := new(errgroup.Group) + group.SetLimit(config.GetCollectorConfig().EventListenerGoroutineGroupLimit) + + return &EventIngestListener{ + ConsumerGroup: eventUploadListener, + eventsAccessLayer: repositories.EventsAccessLayer, + appFragmentAccessLayer: repositories.AppFragmentsAccessLayer, + cacheClientForFragments: cache.NewCacheConfig(), + kafkaProducer: kafkaProducer, + goroutineGroup: group, + } +} + +func (ks *EventIngestListener) Setup(consumerGroupSession sarama.ConsumerGroupSession) error { + log.Info("setup app event ingest kafka listener", zap.String("memberId", consumerGroupSession.MemberID())) + return nil +} + +func (ks *EventIngestListener) Cleanup(consumerGroupSession sarama.ConsumerGroupSession) error { + log.Info("cleanup app event ingest kafka listener", zap.String("memberId", consumerGroupSession.MemberID())) + return nil +} + +func (ks *EventIngestListener) ConsumeClaim(consumerGroupSession sarama.ConsumerGroupSession, consumerGroupClaim sarama.ConsumerGroupClaim) error { + for message := range consumerGroupClaim.Messages() { + ks.processEvent(message) + consumerGroupSession.MarkMessage(message, "") + } + return nil +} + +func (ks *EventIngestListener) processEvent(message *sarama.ConsumerMessage) { + defer func() { + recover() + }() + + client := helper.IdentifyClientThroughHeader(message.Headers) + index := config.GetCollectorConfig().ElasticSearchConfig.AppEventIngestionIndexClientMap[client] + + var appEvents ingester.AppEvent + if err := json.Unmarshal(message.Value, &appEvents); err != nil { + metrics.KafkaEventConsumptionEventFailureCounter.WithLabelValues(message.Topic).Inc() + log.Error("json unmarshalling failed for app event", zap.Error(err)) + } + metrics.KafkaEventConsumptionEventSuccessCounter.WithLabelValues(message.Topic).Inc() + for _, event := range appEvents.Events { + appEvents.BaseAttributes.SessionId = event.SessionId + appEvent := appEvent{ + appEvents.BaseAttributes, + event, + utils.GetCurrentTimeInMillis(), + } + + if utils.Contains(config.GetCollectorConfig().IgnoredEventTypes, string(appEvent.MetricsAttributes.EventType)) { + continue + } + + ks.goroutineGroup.Go(func() error { + retryFunc := func() (interface{}, error) { + esStatusCode, err := ks.uploadEventToElasticSearch(appEvent, index) + return esStatusCode, err + } + + _, err := utils.RetryFunctionWithResponseAndError(retryFunc, config.GetCollectorConfig().MaxRetry, config.GetCollectorConfig().InitialDelayInSeconds*time.Second) + if err != nil { + log.Error("error ingesting event ingestion data to elasticsearch", + zap.String("session id", event.SessionId), zap.Error(err)) + // err = ks.kafkaProducer.SendMessage(errorEventAttributes, config.GetCollectorConfig().KafkaConfig.ErrorEventsUploadTopic, errorEventAttributes.ErrorAttribute.DeviceId, client) + // if err != nil { + // log.Error("error publishing event ingestion data to error-events kafka topic", + // zap.String("session id", event.SessionId), zap.Error(err)) + // } + return nil + } + + return nil + }) + + } + /*if client == utils.NAVI_USER_APP { + for _, event := range appEvents.Events { + event := event + go func() { + ks.uploadFragmentsToElasticSearch(event.FragmentList, event.ScreenName, event.ModuleName, config.GetCollectorConfig().ElasticSearchConfig.FragmentIngestionIndex) + }() + } + }*/ +} + +func (ks *EventIngestListener) uploadEventToElasticSearch(appEvent appEvent, index string) (int, error) { + appEventString, err := json.Marshal(appEvent) + if err != nil { + log.Error("error in serializing app events", zap.Error(err)) + return 0, err + } + esStatusCode, err := ks.eventsAccessLayer.CreateEventIngester(string(appEventString), index) + if err != nil { + log.Error("error ingesting app events data to elasticsearch", zap.Error(err)) + return esStatusCode, err + } + return esStatusCode, nil +} + +func (ks *EventIngestListener) uploadFragmentsToElasticSearch(fragmentList []string, screenName, vertical string, index string) { + for _, fragmentName := range fragmentList { + _, found := ks.cacheClientForFragments.Get(fragmentName) + if !found { + fragmentModel := &ingester.FragmentModel{ + FragmentAttributes: ingester.FragmentAttributes{ + FragmentName: fragmentName, + ScreenName: screenName, + Vertical: vertical, + }, + } + fragment, err := json.Marshal(fragmentModel) + if err != nil { + log.Error("error in serializing app fragments", zap.Error(err)) + return + } + err = ks.appFragmentAccessLayer.CreateFragment(*fragmentModel, string(fragment), index) + if err != nil { + log.Error("error ingesting app fragments data to elasticsearch", zap.Error(err)) + return + } + ks.cacheClientForFragments.PutWithTtl(fragmentName, screenName, config.GetCollectorConfig().CacheTimeForFragmentsIngestion) + } + } +} diff --git a/alfred/cmd/collector/listener/event_ingest_listener_test.go b/alfred/cmd/collector/listener/event_ingest_listener_test.go new file mode 100644 index 0000000..00cd901 --- /dev/null +++ b/alfred/cmd/collector/listener/event_ingest_listener_test.go @@ -0,0 +1,160 @@ +package listener + +import ( + "alfred/mocks" + "alfred/pkg/log" + "errors" + "github.com/Shopify/sarama" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "testing" +) + +func TestEventIngestListener_Setup(t *testing.T) { + + log.InitLogger() + consumerGroupSessionMock := &mocks.ConsumerGroupSession{} + testMemberID := "testMemberID" + + ks := &EventIngestListener{} + consumerGroupSessionMock.On("MemberID").Return(testMemberID) + err := ks.Setup(consumerGroupSessionMock) + + consumerGroupSessionMock.AssertExpectations(t) + assert.Nil(t, err, "err should be nil") + +} + +func TestEventIngestListener_Cleanup(t *testing.T) { + + log.InitLogger() + consumerGroupSessionMock := &mocks.ConsumerGroupSession{} + testMemberID := "testMemberID" + + ks := &EventIngestListener{} + consumerGroupSessionMock.On("MemberID").Return(testMemberID) + err := ks.Cleanup(consumerGroupSessionMock) + + consumerGroupSessionMock.AssertExpectations(t) + assert.Nil(t, err, "err should be nil") + +} + +func TestEventIngestListener_ConsumeClaimWithoutEvents(t *testing.T) { + + log.InitLogger() + elasticSearchClientMock := &mocks.MockElasticSearchClient{} + consumerGroupSessionMock := &mocks.ConsumerGroupSession{} + consumerGroupClaimMock := &mocks.ConsumerGroupClaim{} + consumerGroupMock := &mocks.MockConsumerGroup{} + cacheClientMock := &mocks.MockCacheClient{} + + messagesChan := make(chan *sarama.ConsumerMessage, 1) + messagesBufferedChan := make(chan *sarama.ConsumerMessage, 1) + mockMessage1 := &sarama.ConsumerMessage{ + Value: []byte("Test Message 1"), + } + messagesChan <- mockMessage1 + close(messagesChan) + go func() { + for msg := range messagesChan { + messagesBufferedChan <- msg + } + close(messagesBufferedChan) + }() + + ks := &EventIngestListener{consumerGroupMock, elasticSearchClientMock, cacheClientMock} + consumerGroupSessionMock.On("MarkMessage", mock.Anything, mock.Anything).Once() + consumerGroupClaimMock.On("Messages").Return((<-chan *sarama.ConsumerMessage)(messagesBufferedChan)).Once() + err := ks.ConsumeClaim(consumerGroupSessionMock, consumerGroupClaimMock) + + assert.Nil(t, err) + consumerGroupSessionMock.AssertExpectations(t) + consumerGroupClaimMock.AssertExpectations(t) + elasticSearchClientMock.AssertExpectations(t) + consumerGroupMock.AssertExpectations(t) +} + +func TestEventIngestListener_ConsumeClaimWithEvents_Success(t *testing.T) { + + log.InitLogger() + elasticSearchClientMock := &mocks.MockElasticSearchClient{} + consumerGroupSessionMock := &mocks.ConsumerGroupSession{} + consumerGroupClaimMock := &mocks.ConsumerGroupClaim{} + consumerGroupMock := &mocks.MockConsumerGroup{} + cacheClientMock := &mocks.MockCacheClient{} + + messagesChan := make(chan *sarama.ConsumerMessage, 1) + messagesBufferedChan := make(chan *sarama.ConsumerMessage, 1) + mockMessage1 := &sarama.ConsumerMessage{ + Key: []byte("Key1"), + Value: []byte("{\n \"base_attributes\": {\n \"app_version_code\": \"83\",\n \"app_version_name\": \"2.3.10\",\n \"client_ts\": 1695293792528,\n \"device_id\": \"c9f33448e4a538af\",\n \"device_model\": \"sdk_gphone_arm64\",\n \"device_manufacturer\": \"Google\",\n \"app_os\": \"Android\",\n \"os_version\": \"30\",\n \"customer_id\": \"92226097-2c0b-4b19-940d-b3432953c9dc\",\n \"carrier_name\": \"Android\",\n \"metadata\": {\n \"agent_email_id\": \"girish.s@navi.com\",\n \"code_push_version\": \"2.3.10\",\n \"phone_number\": \"8757641020\"\n },\n \"session_time_stamp\": 1695293792528,\n \"event_timestamp\": 1695293827257,\n \"session_id\": \"29d67a40-3141-4660-a33e-4e7e27e333b7ALFRED_SESSION_ID\"\n },\n \"events\": [\n {\n \"session_id\": \"29d67a40-3141-4660-a33e-4e7e27e333b7ALFRED_SESSION_ID\",\n \"screen_name\": \"Cosmos\",\n \"module_name\": \"Cosmos\",\n \"event_name\": \"SCROLL_EVENT\",\n \"event_timestamp\": 1695293800233,\n \"attributes\": {\n \"END_X\": \"182.98828\",\n \"END_Y\": \"2100.9155\",\n \"START_X\": \"182.98828\",\n \"START_Y\": \"2100.9155\"\n },\n \"event_type\": \"SCROLL_EVENT\"\n }\n ]\n}"), + Topic: "Topic1", + Partition: 0, + Offset: 100, + } + messagesChan <- mockMessage1 + close(messagesChan) + go func() { + for msg := range messagesChan { + messagesBufferedChan <- msg + } + close(messagesBufferedChan) + }() + + ks := &EventIngestListener{consumerGroupMock, elasticSearchClientMock, cacheClientMock} + consumerGroupSessionMock.On("MarkMessage", mock.Anything, mock.Anything).Once() + consumerGroupClaimMock.On("Messages").Return((<-chan *sarama.ConsumerMessage)(messagesBufferedChan)).Once() + elasticSearchClientMock.On("CreateEventIngester", mock.Anything, mock.Anything).Return(nil) + err := ks.ConsumeClaim(consumerGroupSessionMock, consumerGroupClaimMock) + + assert.Nil(t, err) + consumerGroupSessionMock.AssertExpectations(t) + consumerGroupClaimMock.AssertExpectations(t) + elasticSearchClientMock.AssertExpectations(t) + consumerGroupMock.AssertExpectations(t) + +} + +func TestEventIngestListener_ConsumeClaimWithEvents_Failure(t *testing.T) { + + log.InitLogger() + elasticSearchClientMock := &mocks.MockElasticSearchClient{} + consumerGroupSessionMock := &mocks.ConsumerGroupSession{} + consumerGroupClaimMock := &mocks.ConsumerGroupClaim{} + consumerGroupMock := &mocks.MockConsumerGroup{} + cacheClientMock := &mocks.MockCacheClient{} + expectedError := errors.New("mocked error") + + messagesChan := make(chan *sarama.ConsumerMessage, 1) + messagesBufferedChan := make(chan *sarama.ConsumerMessage, 1) + mockMessage1 := &sarama.ConsumerMessage{ + Key: []byte("Key1"), + Value: []byte("{\n \"base_attributes\": {\n \"app_version_code\": \"83\",\n \"app_version_name\": \"2.3.10\",\n \"client_ts\": 1695293792528,\n \"device_id\": \"c9f33448e4a538af\",\n \"device_model\": \"sdk_gphone_arm64\",\n \"device_manufacturer\": \"Google\",\n \"app_os\": \"Android\",\n \"os_version\": \"30\",\n \"customer_id\": \"92226097-2c0b-4b19-940d-b3432953c9dc\",\n \"carrier_name\": \"Android\",\n \"metadata\": {\n \"agent_email_id\": \"girish.s@navi.com\",\n \"code_push_version\": \"2.3.10\",\n \"phone_number\": \"8757641020\"\n },\n \"session_time_stamp\": 1695293792528,\n \"event_timestamp\": 1695293827257,\n \"session_id\": \"29d67a40-3141-4660-a33e-4e7e27e333b7ALFRED_SESSION_ID\"\n },\n \"events\": [\n {\n \"session_id\": \"29d67a40-3141-4660-a33e-4e7e27e333b7ALFRED_SESSION_ID\",\n \"screen_name\": \"Cosmos\",\n \"module_name\": \"Cosmos\",\n \"event_name\": \"SCROLL_EVENT\",\n \"event_timestamp\": 1695293800233,\n \"attributes\": {\n \"END_X\": \"182.98828\",\n \"END_Y\": \"2100.9155\",\n \"START_X\": \"182.98828\",\n \"START_Y\": \"2100.9155\"\n },\n \"event_type\": \"SCROLL_EVENT\"\n }\n ]\n}"), + Topic: "Topic1", + Partition: 0, + Offset: 100, + } + messagesChan <- mockMessage1 + close(messagesChan) + + go func() { + for msg := range messagesChan { + messagesBufferedChan <- msg + } + close(messagesBufferedChan) + }() + + ks := &EventIngestListener{consumerGroupMock, elasticSearchClientMock, cacheClientMock} + consumerGroupSessionMock.On("MarkMessage", mock.Anything, mock.Anything).Once() + consumerGroupClaimMock.On("Messages").Return((<-chan *sarama.ConsumerMessage)(messagesBufferedChan)).Once() + elasticSearchClientMock.On("CreateEventIngester", mock.Anything, mock.Anything).Return(expectedError) + err := ks.ConsumeClaim(consumerGroupSessionMock, consumerGroupClaimMock) + + assert.Nil(t, err) + consumerGroupSessionMock.AssertExpectations(t) + consumerGroupClaimMock.AssertExpectations(t) + elasticSearchClientMock.AssertExpectations(t) + consumerGroupMock.AssertExpectations(t) + +} diff --git a/alfred/cmd/collector/listener/metric_upload_listener.go b/alfred/cmd/collector/listener/metric_upload_listener.go new file mode 100644 index 0000000..5f7f7f9 --- /dev/null +++ b/alfred/cmd/collector/listener/metric_upload_listener.go @@ -0,0 +1,87 @@ +package listener + +import ( + "alfred/config" + "alfred/internal/metrics" + "alfred/model/ingester" + "alfred/pkg/kafka" + "alfred/pkg/log" + "encoding/json" + "github.com/Shopify/sarama" + "go.uber.org/zap" + "os" +) + +type MetricUploadListener struct { + consumerGroup sarama.ConsumerGroup + appMetricPublisher metrics.AppMetricsPublisher +} + +type performanceMetrics struct { + BaseAttributes ingester.BaseAttributes `json:"base_attributes,omitempty"` + MetricsAttributes ingester.MetricsAttributes `json:"metrics_attributes,omitempty"` +} + +func NewMetricUploadListener() *MetricUploadListener { + sessionUploadListener, err := kafka.SaramaKafkaConsumer( + config.GetCollectorConfig().BaseConfig.Env, + config.GetCollectorConfig().KafkaConfig.BaseConfig, + config.GetCollectorConfig().KafkaConfig.MetricIngestionTopicGroupId) + if err != nil { + log.Error("metrics upload listener initialisation failed", zap.Error(err)) + os.Exit(1) + } + + return &MetricUploadListener{ + consumerGroup: sessionUploadListener, + appMetricPublisher: metrics.NewAppMetricPublisher(), + } +} + +func (ks *MetricUploadListener) Setup(consumerGroupSession sarama.ConsumerGroupSession) error { + log.Info("setup metric upload kafka listener", zap.String("memberId", consumerGroupSession.MemberID())) + return nil +} + +func (ks *MetricUploadListener) Cleanup(consumerGroupSession sarama.ConsumerGroupSession) error { + log.Info("cleanup metric upload kafka listener", zap.String("memberId", consumerGroupSession.MemberID())) + return nil +} + +func (ks *MetricUploadListener) ConsumeClaim(consumerGroupSession sarama.ConsumerGroupSession, consumerGroupClaim sarama.ConsumerGroupClaim) error { + for message := range consumerGroupClaim.Messages() { + ks.processMessage(message) + consumerGroupSession.MarkMessage(message, "") + } + return nil +} + +func (ks *MetricUploadListener) processMessage(message *sarama.ConsumerMessage) { + defer func() { + recover() + }() + + var appPerformanceMetrics ingester.AppMetrics + if err := json.Unmarshal(message.Value, &appPerformanceMetrics); err != nil { + metrics.KafkaEventConsumptionEventFailureCounter.WithLabelValues(message.Topic).Inc() + log.Error("json unmarshalling failed for app performance metrics event", zap.Error(err)) + } + metrics.KafkaEventConsumptionEventSuccessCounter.WithLabelValues(message.Topic).Inc() + + for i := 0; i < len(appPerformanceMetrics.MetricsAttributes); i++ { + performanceMetric := performanceMetrics{ + appPerformanceMetrics.BaseAttributes, + appPerformanceMetrics.MetricsAttributes[i], + } + ks.uploadMetricsToPrometheus(performanceMetric) + } + +} + +func (ks *MetricUploadListener) uploadMetricsToPrometheus(performanceMetrics performanceMetrics) { + ks.appMetricPublisher.PublishMetrics( + performanceMetrics.MetricsAttributes.Attributes, + performanceMetrics.MetricsAttributes.EventType, + performanceMetrics.BaseAttributes, + ) +} diff --git a/alfred/cmd/collector/listener/metric_upload_listener_test.go b/alfred/cmd/collector/listener/metric_upload_listener_test.go new file mode 100644 index 0000000..d14035a --- /dev/null +++ b/alfred/cmd/collector/listener/metric_upload_listener_test.go @@ -0,0 +1,118 @@ +package listener + +import ( + "alfred/mocks" + "alfred/pkg/log" + "github.com/Shopify/sarama" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "testing" +) + +func TestMetricUploadListener_Setup(t *testing.T) { + + log.InitLogger() + consumerGroupSessionMock := &mocks.ConsumerGroupSession{} + testMemberID := "testMemberID" + + ks := &MetricUploadListener{} + consumerGroupSessionMock.On("MemberID").Return(testMemberID) + err := ks.Setup(consumerGroupSessionMock) + + consumerGroupSessionMock.AssertExpectations(t) + assert.Nil(t, err, "err should be nil") + +} + +func TestMetricUploadListener_Cleanup(t *testing.T) { + + log.InitLogger() + consumerGroupSessionMock := &mocks.ConsumerGroupSession{} + testMemberID := "testMemberID" + + ks := &MetricUploadListener{} + consumerGroupSessionMock.On("MemberID").Return(testMemberID) + err := ks.Cleanup(consumerGroupSessionMock) + + consumerGroupSessionMock.AssertExpectations(t) + assert.Nil(t, err, "err should be nil") + +} + +func TestMetricUploadListener_ConsumeClaimWithoutMetrics(t *testing.T) { + + log.InitLogger() + elasticSearchClientMock := &mocks.MockElasticSearchClient{} + consumerGroupSessionMock := &mocks.ConsumerGroupSession{} + consumerGroupClaimMock := &mocks.ConsumerGroupClaim{} + consumerGroupMock := &mocks.MockConsumerGroup{} + appMetricPublisherMock := &mocks.MockAppMetricsPublisher{} + + messagesChan := make(chan *sarama.ConsumerMessage, 1) + messagesBufferedChan := make(chan *sarama.ConsumerMessage, 1) + mockMessage1 := &sarama.ConsumerMessage{ + Value: []byte("Test Message 1"), + } + messagesChan <- mockMessage1 + close(messagesChan) + go func() { + for msg := range messagesChan { + messagesBufferedChan <- msg + } + close(messagesBufferedChan) + }() + + ks := &MetricUploadListener{consumerGroupMock, elasticSearchClientMock, appMetricPublisherMock} + consumerGroupSessionMock.On("MarkMessage", mock.Anything, mock.Anything).Once() + consumerGroupClaimMock.On("Messages").Return((<-chan *sarama.ConsumerMessage)(messagesBufferedChan)).Once() + err := ks.ConsumeClaim(consumerGroupSessionMock, consumerGroupClaimMock) + + assert.Nil(t, err) + consumerGroupSessionMock.AssertExpectations(t) + consumerGroupClaimMock.AssertExpectations(t) + elasticSearchClientMock.AssertExpectations(t) + consumerGroupMock.AssertExpectations(t) + appMetricPublisherMock.AssertExpectations(t) +} + +func TestMetricUploadListener_ConsumeClaimWithMetrics(t *testing.T) { + + log.InitLogger() + elasticSearchClientMock := &mocks.MockElasticSearchClient{} + consumerGroupSessionMock := &mocks.ConsumerGroupSession{} + consumerGroupClaimMock := &mocks.ConsumerGroupClaim{} + consumerGroupMock := &mocks.MockConsumerGroup{} + appMetricPublisherMock := &mocks.MockAppMetricsPublisher{} + + messagesChan := make(chan *sarama.ConsumerMessage, 1) + messagesBufferedChan := make(chan *sarama.ConsumerMessage, 1) + mockMessage1 := &sarama.ConsumerMessage{ + Key: []byte("Key1"), + Value: []byte("{\n \"base_attributes\": {\n \"app_version_code\": \"294\",\n \"app_version_name\": \"3.3.1-debug\",\n \"device_id\": \"a60f2186f3bfb31f\",\n \"device_model\": \"LE2101\",\n \"device_manufacturer\": \"OnePlus\",\n \"app_os\": \"Android\",\n \"os_version\": \"31\",\n \"latitude\": 20.34,\n \"longitude\": 18.23,\n \"customer_id\": \"b2dbbab6-5b82-4a4f-8afb-57490d3ce1bb\",\n \"carrier_name\": \"JIO\"\n },\n \"metrics_attributes\": [\n {\n \"event_id\": \"qwe\",\n \"event_name\": \"41dsfa\",\n \"event_timestamp\": 1676371697152,\n \"session_id\": \"41dsfa\",\n \"attributes\": {\n \"bytes_received\": 200,\n \"bytes_sent\": 200,\n \"duration_in_ms\": 3,\n \"end_time\": 1676371697155,\n \"error_message\": \"qwe\",\n \"error_type\": \"qwe\",\n \"method\": \"qwe\",\n \"response_code\": 200,\n \"start_time\": 1676371697152,\n \"url\": \"qwe\"\n },\n \"event_type\": \"API_METRICS\"\n }\n ]\n}"), + Topic: "Topic1", + Partition: 0, + Offset: 100, + } + messagesChan <- mockMessage1 + close(messagesChan) + go func() { + for msg := range messagesChan { + messagesBufferedChan <- msg + } + close(messagesBufferedChan) + }() + + ks := &MetricUploadListener{consumerGroupMock, elasticSearchClientMock, appMetricPublisherMock} + consumerGroupSessionMock.On("MarkMessage", mock.Anything, mock.Anything).Once() + consumerGroupClaimMock.On("Messages").Return((<-chan *sarama.ConsumerMessage)(messagesBufferedChan)).Once() + appMetricPublisherMock.On("PublishMetrics", mock.Anything, mock.Anything, mock.Anything).Once() + err := ks.ConsumeClaim(consumerGroupSessionMock, consumerGroupClaimMock) + + assert.Nil(t, err) + consumerGroupSessionMock.AssertExpectations(t) + consumerGroupClaimMock.AssertExpectations(t) + appMetricPublisherMock.AssertExpectations(t) + elasticSearchClientMock.AssertExpectations(t) + consumerGroupMock.AssertExpectations(t) + +} diff --git a/alfred/cmd/collector/listener/session_upload_listener.go b/alfred/cmd/collector/listener/session_upload_listener.go new file mode 100644 index 0000000..0371651 --- /dev/null +++ b/alfred/cmd/collector/listener/session_upload_listener.go @@ -0,0 +1,141 @@ +package listener + +import ( + "alfred/cmd/collector/helper" + "alfred/config" + "alfred/internal/metrics" + "alfred/model/ingester" + "alfred/pkg/kafka" + kafka2 "alfred/pkg/kafka/produce" + "alfred/pkg/log" + "alfred/repositoryAccessLayer" + "alfred/utils" + "encoding/json" + "github.com/Shopify/sarama" + "go.uber.org/zap" + "golang.org/x/sync/errgroup" + "os" + "time" +) + +type SessionUploadListener struct { + ConsumerGroup sarama.ConsumerGroup + sessionsAccessLayer repositoryAccessLayer.SessionsAccessLayer + errorEventsAccessLayer repositoryAccessLayer.ErrorEventsAccessLayer + KafkaProducer kafka2.KProducer + goroutineGroup *errgroup.Group +} + +func NewSessionUploadListener(repositories *repositoryAccessLayer.RepositoryAccessLayer, kafkaProducer kafka2.KProducer) *SessionUploadListener { + sessionUploadListener, err := kafka.SaramaKafkaConsumer( + config.GetCollectorConfig().BaseConfig.Env, + config.GetCollectorConfig().KafkaConfig.BaseConfig, + config.GetCollectorConfig().KafkaConfig.SessionUploadTopicGroupId, + ) + if err != nil { + log.Error("session upload listener initialisation failed", zap.Error(err)) + os.Exit(1) + } + + group := new(errgroup.Group) + group.SetLimit(config.GetCollectorConfig().SessionUploadListenerGoroutineGroupLimit) + + return &SessionUploadListener{ + ConsumerGroup: sessionUploadListener, + sessionsAccessLayer: repositories.SessionsAccessLayer, + errorEventsAccessLayer: repositories.ErrorEventsAccessLayer, + KafkaProducer: kafkaProducer, + goroutineGroup: group, + } +} + +func (ks *SessionUploadListener) Setup(consumerGroupSession sarama.ConsumerGroupSession) error { + log.Info("setup session upload kafka listener", zap.String("memberId", consumerGroupSession.MemberID())) + return nil +} + +func (ks *SessionUploadListener) Cleanup(consumerGroupSession sarama.ConsumerGroupSession) error { + log.Info("cleanup session upload kafka listener", zap.String("memberId", consumerGroupSession.MemberID())) + return nil +} + +func (ks *SessionUploadListener) ConsumeClaim(consumerGroupSession sarama.ConsumerGroupSession, consumerGroupClaim sarama.ConsumerGroupClaim) error { + for message := range consumerGroupClaim.Messages() { + ks.processMessage(message) + consumerGroupSession.MarkMessage(message, "") + } + return nil +} + +func (ks *SessionUploadListener) processMessage(message *sarama.ConsumerMessage) { + defer func() { + if r := recover(); r != nil { + log.Error("panic recovered in session upload listener", zap.Any("recover", r)) + } + }() + + // One goroutine per message, bounded by errgroup limit + ks.goroutineGroup.Go(func() error { + cfg := config.GetCollectorConfig() + + client := helper.IdentifyClientThroughHeader(message.Headers) + sessionUploadIndex := cfg.ElasticSearchConfig.AppSessionUploadIndexClientMap[client] + + var uploadReq ingester.SessionUploadRequest + if err := json.Unmarshal(message.Value, &uploadReq); err != nil { + metrics.KafkaEventConsumptionEventFailureCounter.WithLabelValues(message.Topic).Inc() + log.Error("json unmarshalling failed while ingesting session upload data to elasticsearch", zap.Error(err)) + return nil + } + + uploadReq.CreatedAt = utils.GetCurrentTimeInMillis() + metrics.KafkaEventConsumptionEventSuccessCounter.WithLabelValues(message.Topic).Inc() + + // Retry ElasticSearch ingestion + retryFn := func() (interface{}, error) { + return ks.sessionsAccessLayer.UploadSession(uploadReq, sessionUploadIndex) + } + + _, err := utils.RetryFunctionWithResponseAndError(retryFn, cfg.MaxRetry, cfg.InitialDelayInSeconds*time.Second) + if err != nil { + log.Error("error ingesting session upload data to elasticsearch", zap.Error(err)) + // errorAttrs := helper.BuildESUploadErrorEvent( + // uploadReq.BaseAttributes, + // uploadReq.SessionUploadEventAttributes.EventId, + // client, + // err, + // sessionUploadIndex, + // utils.EMPTY, + // response.(int), + // ) + // if err := ks.KafkaProducer.SendMessage( + // errorAttrs, + // cfg.KafkaConfig.ErrorEventsUploadTopic, + // errorAttrs.ErrorAttribute.DeviceId, + // client, + // ); err != nil { + // log.Error("error publishing session upload data to error-events kafka topic", zap.Error(err)) + // } + return nil + } + + // if response == http.StatusCreated { + // // Publish to update topic synchronously to avoid nested goroutine and data races + // updateRetryFn := func() error { + // return ks.KafkaProducer.SendMessage( + // uploadReq, + // cfg.KafkaConfig.ErrorEventsUpdateTopic, + // uploadReq.BaseAttributes.DeviceId, + // client, + // ) + // } + // if err := utils.RetryFunctionWithError(updateRetryFn, cfg.MaxRetry, cfg.InitialDelayInSeconds*time.Second); err != nil { + // log.Error("error publishing session upload data to error-events-update kafka topic", + // zap.String("session_id", uploadReq.BaseAttributes.SessionId), + // zap.Error(err)) + // } + // } + + return nil + }) +} diff --git a/alfred/cmd/collector/listener/session_upload_listener_test.go b/alfred/cmd/collector/listener/session_upload_listener_test.go new file mode 100644 index 0000000..4082d77 --- /dev/null +++ b/alfred/cmd/collector/listener/session_upload_listener_test.go @@ -0,0 +1,153 @@ +package listener + +import ( + "alfred/mocks" + "alfred/pkg/log" + "errors" + "github.com/Shopify/sarama" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "testing" +) + +func TestSessionUploadListenerr_Setup(t *testing.T) { + + log.InitLogger() + consumerGroupSessionMock := &mocks.ConsumerGroupSession{} + testMemberID := "testMemberID" + + ks := &SessionUploadListener{} + consumerGroupSessionMock.On("MemberID").Return(testMemberID) + err := ks.Setup(consumerGroupSessionMock) + assert.Nil(t, err, "err should be nil") + consumerGroupSessionMock.AssertExpectations(t) + +} + +func TestSessionUploadListener_Cleanup(t *testing.T) { + + log.InitLogger() + consumerGroupSessionMock := &mocks.ConsumerGroupSession{} + testMemberID := "testMemberID" + + ks := &SessionUploadListener{} + consumerGroupSessionMock.On("MemberID").Return(testMemberID) + err := ks.Cleanup(consumerGroupSessionMock) + assert.Nil(t, err, "err should be nil") + consumerGroupSessionMock.AssertExpectations(t) + +} + +func TestSessionUploadListener_ConsumeClaimWithoutEvents(t *testing.T) { + + log.InitLogger() + elasticSearchClientMock := &mocks.MockElasticSearchClient{} + consumerGroupSessionMock := &mocks.ConsumerGroupSession{} + consumerGroupClaimMock := &mocks.ConsumerGroupClaim{} + consumerGroupMock := &mocks.MockConsumerGroup{} + + messagesChan := make(chan *sarama.ConsumerMessage, 2) + messagesBufferedChan := make(chan *sarama.ConsumerMessage, 2) + mockMessage1 := &sarama.ConsumerMessage{ + Value: []byte("Test Message 1"), + } + messagesChan <- mockMessage1 + close(messagesChan) + go func() { + for msg := range messagesChan { + messagesBufferedChan <- msg + } + close(messagesBufferedChan) + }() + + ks := &SessionUploadListener{consumerGroupMock, elasticSearchClientMock} + consumerGroupSessionMock.On("MarkMessage", mock.Anything, mock.Anything).Once() + consumerGroupClaimMock.On("Messages").Return((<-chan *sarama.ConsumerMessage)(messagesBufferedChan)).Once() + err := ks.ConsumeClaim(consumerGroupSessionMock, consumerGroupClaimMock) + + assert.Nil(t, err) + consumerGroupSessionMock.AssertExpectations(t) + consumerGroupClaimMock.AssertExpectations(t) + elasticSearchClientMock.AssertExpectations(t) + consumerGroupMock.AssertExpectations(t) +} + +func TestSessionUploadListener_ConsumeClaimWithEvents_Success(t *testing.T) { + + log.InitLogger() + elasticSearchClientMock := &mocks.MockElasticSearchClient{} + consumerGroupSessionMock := &mocks.ConsumerGroupSession{} + consumerGroupClaimMock := &mocks.ConsumerGroupClaim{} + consumerGroupMock := &mocks.MockConsumerGroup{} + + messagesChan := make(chan *sarama.ConsumerMessage, 2) + messagesBufferedChan := make(chan *sarama.ConsumerMessage, 2) + mockMessage1 := &sarama.ConsumerMessage{ + Key: []byte("Key1"), + Value: []byte("\"{\\n \\\"base_attributes\\\" : {\\n \\\"app_version_code\\\" : \\\"332\\\",\\n \\\"app_version_name\\\" : \\\"3.4.11-debug\\\",\\n \\\"client_ts\\\" : 1690881905125,\\n \\\"device_id\\\" : \\\"d514d237b3ff5d98\\\",\\n \\\"device_model\\\" : \\\"sdk_gphone_arm64\\\",\\n \\\"device_manufacturer\\\" : \\\"Google\\\",\\n \\\"app_os\\\" : \\\"Android\\\",\\n \\\"os_version\\\" : \\\"30\\\",\\n \\\"latitude\\\" : 37.421997,\\n \\\"longitude\\\" : -122.084,\\n \\\"customer_id\\\" : \\\"d514d237b3ff5d98\\\",\\n \\\"carrier_name\\\" : \\\"Android\\\",\\n \\\"session_time_stamp\\\" : 1690529859742,\\n \\\"event_timestamp\\\" : 1690881966898,\\n \\\"session_id\\\" : \\\"0912105e-a4e3-46e6-9109-817c14c1c544ALFRED_SESSION_ID\\\"\\n },\\n \\\"session_upload_event_attributes\\\" : {\\n \\\"beginning_device_attributes\\\" : {\\n \\\"battery\\\" : 100,\\n \\\"storage\\\" : 797028350\\n },\\n \\\"end_device_attributes\\\" : {\\n \\\"battery\\\" : 100,\\n \\\"storage\\\" : 797016060,\\n \\\"memory\\\" : 48.128532\\n },\\n \\\"event_id\\\" : \\\"e238f0ac-c83e-4ee2-bd42-8a4e205b4d28ALFRED_EVENT_ID\\\"\\n }\\n }\""), + Topic: "Topic1", + Partition: 0, + Offset: 100, + } + messagesChan <- mockMessage1 + close(messagesChan) + go func() { + for msg := range messagesChan { + messagesBufferedChan <- msg + } + close(messagesBufferedChan) + }() + ks := &SessionUploadListener{consumerGroupMock, elasticSearchClientMock} + consumerGroupSessionMock.On("MarkMessage", mock.Anything, mock.Anything).Once() + consumerGroupClaimMock.On("Messages").Return((<-chan *sarama.ConsumerMessage)(messagesBufferedChan)).Once() + elasticSearchClientMock.On("UploadSession", mock.Anything, mock.Anything, mock.Anything).Return(nil) + err := ks.ConsumeClaim(consumerGroupSessionMock, consumerGroupClaimMock) + + assert.Nil(t, err) + consumerGroupSessionMock.AssertExpectations(t) + consumerGroupClaimMock.AssertExpectations(t) + elasticSearchClientMock.AssertExpectations(t) + consumerGroupMock.AssertExpectations(t) + +} + +func TestSessionUploadListener_ConsumeClaimWithEvents_Failure(t *testing.T) { + + log.InitLogger() + elasticSearchClientMock := &mocks.MockElasticSearchClient{} + consumerGroupSessionMock := &mocks.ConsumerGroupSession{} + consumerGroupClaimMock := &mocks.ConsumerGroupClaim{} + consumerGroupMock := &mocks.MockConsumerGroup{} + expectedError := errors.New("mocked error") + + messagesChan := make(chan *sarama.ConsumerMessage, 2) + messagesBufferedChan := make(chan *sarama.ConsumerMessage, 2) + mockMessage1 := &sarama.ConsumerMessage{ + Key: []byte("Key1"), + Value: []byte("\"{\\n \\\"base_attributes\\\" : {\\n \\\"app_version_code\\\" : \\\"332\\\",\\n \\\"app_version_name\\\" : \\\"3.4.11-debug\\\",\\n \\\"client_ts\\\" : 1690881905125,\\n \\\"device_id\\\" : \\\"d514d237b3ff5d98\\\",\\n \\\"device_model\\\" : \\\"sdk_gphone_arm64\\\",\\n \\\"device_manufacturer\\\" : \\\"Google\\\",\\n \\\"app_os\\\" : \\\"Android\\\",\\n \\\"os_version\\\" : \\\"30\\\",\\n \\\"latitude\\\" : 37.421997,\\n \\\"longitude\\\" : -122.084,\\n \\\"customer_id\\\" : \\\"d514d237b3ff5d98\\\",\\n \\\"carrier_name\\\" : \\\"Android\\\",\\n \\\"session_time_stamp\\\" : 1690529859742,\\n \\\"event_timestamp\\\" : 1690881966898,\\n \\\"session_id\\\" : \\\"0912105e-a4e3-46e6-9109-817c14c1c544ALFRED_SESSION_ID\\\"\\n },\\n \\\"session_upload_event_attributes\\\" : {\\n \\\"beginning_device_attributes\\\" : {\\n \\\"battery\\\" : 100,\\n \\\"storage\\\" : 797028350\\n },\\n \\\"end_device_attributes\\\" : {\\n \\\"battery\\\" : 100,\\n \\\"storage\\\" : 797016060,\\n \\\"memory\\\" : 48.128532\\n },\\n \\\"event_id\\\" : \\\"e238f0ac-c83e-4ee2-bd42-8a4e205b4d28ALFRED_EVENT_ID\\\"\\n }\\n }\""), + Topic: "Topic1", + Partition: 0, + Offset: 100, + } + messagesChan <- mockMessage1 + close(messagesChan) + go func() { + for msg := range messagesChan { + messagesBufferedChan <- msg + } + close(messagesBufferedChan) + }() + + ks := &SessionUploadListener{consumerGroupMock, elasticSearchClientMock} + consumerGroupSessionMock.On("MarkMessage", mock.Anything, mock.Anything).Once() + consumerGroupClaimMock.On("Messages").Return((<-chan *sarama.ConsumerMessage)(messagesBufferedChan)).Once() + elasticSearchClientMock.On("UploadSession", mock.Anything, mock.Anything, mock.Anything).Return(expectedError) + err := ks.ConsumeClaim(consumerGroupSessionMock, consumerGroupClaimMock) + + assert.Nil(t, err) + consumerGroupSessionMock.AssertExpectations(t) + consumerGroupClaimMock.AssertExpectations(t) + elasticSearchClientMock.AssertExpectations(t) + consumerGroupMock.AssertExpectations(t) + +} diff --git a/alfred/cmd/collector/listener/web_session_upload_listener.go b/alfred/cmd/collector/listener/web_session_upload_listener.go new file mode 100644 index 0000000..45b0059 --- /dev/null +++ b/alfred/cmd/collector/listener/web_session_upload_listener.go @@ -0,0 +1,127 @@ +package listener + +import ( + "alfred/config" + "alfred/internal/metrics" + "alfred/model/ingester" + "alfred/pkg/kafka" + "alfred/pkg/log" + "alfred/pkg/s3" + "alfred/repositoryAccessLayer" + "alfred/utils" + "encoding/json" + "fmt" + "github.com/Shopify/sarama" + "go.uber.org/zap" + "golang.org/x/sync/errgroup" + "os" + "path/filepath" +) + +type WebSessionUploadListener struct { + ConsumerGroup sarama.ConsumerGroup + webSessionsAccessLayer repositoryAccessLayer.WebSessionsAccessLayer + s3Client s3.S3Client + goroutineGroup *errgroup.Group +} + +func NewWebSessionUploadListener(repositories *repositoryAccessLayer.RepositoryAccessLayer, s3Client s3.S3Client) *WebSessionUploadListener { + sessionUploadListener, err := kafka.SaramaKafkaConsumer( + config.GetCollectorConfig().BaseConfig.Env, + config.GetCollectorConfig().KafkaConfig.BaseConfig, + config.GetCollectorConfig().KafkaConfig.WebSessionUploadTopicGroupId, + ) + + if err != nil { + log.Error("web session upload listener initialisation failed", zap.Error(err)) + os.Exit(1) + } + + group := new(errgroup.Group) + group.SetLimit(config.GetCollectorConfig().WebSessionUploadListenerGoroutineGroupLimit) + + return &WebSessionUploadListener{ + ConsumerGroup: sessionUploadListener, + webSessionsAccessLayer: repositories.WebSessionsAccessLayer, + s3Client: s3Client, + goroutineGroup: group, + } +} + +func (ks *WebSessionUploadListener) Setup(consumerGroupSession sarama.ConsumerGroupSession) error { + log.Info("setup web session upload kafka listener", zap.String("memberId", consumerGroupSession.MemberID())) + return nil +} + +func (ks *WebSessionUploadListener) Cleanup(consumerGroupSession sarama.ConsumerGroupSession) error { + log.Info("cleanup web session upload kafka listener", zap.String("memberId", consumerGroupSession.MemberID())) + return nil +} + +func (ks *WebSessionUploadListener) ConsumeClaim(consumerGroupSession sarama.ConsumerGroupSession, consumerGroupClaim sarama.ConsumerGroupClaim) error { + for message := range consumerGroupClaim.Messages() { + ks.processMessage(message) + consumerGroupSession.MarkMessage(message, utils.EMPTY) + } + return nil +} + +func (ks *WebSessionUploadListener) processMessage(message *sarama.ConsumerMessage) { + defer func() { + recover() + }() + + ks.goroutineGroup.Go(func() error { + var eventUploadRequest ingester.WebSessionUploadRequest + if err := json.Unmarshal(message.Value, &eventUploadRequest); err != nil { + metrics.KafkaEventConsumptionEventFailureCounter.WithLabelValues(message.Topic).Inc() + log.Error("json unmarshalling failed while ingesting web event upload data to elasticsearch", zap.Error(err)) + return nil + } + ks.ingestWebSessionData(eventUploadRequest) + metrics.KafkaEventConsumptionEventSuccessCounter.WithLabelValues(message.Topic).Inc() + return nil + }) +} + +func (ks *WebSessionUploadListener) ingestWebSessionData(eventUploadRequest ingester.WebSessionUploadRequest) { + eventUploadRequest.CreatedAt = utils.GetCurrentTimeInMillis() + clientName := eventUploadRequest.BaseAttributes.ProjectName + webSessionUploadIndex := config.GetCollectorConfig().ElasticSearchConfig.WebSessionUploadIndexClientMap[clientName] + if (eventUploadRequest.SessionAttributes.Data != nil) && (len(eventUploadRequest.SessionAttributes.Data) > 0) { + uuidFileName := eventUploadRequest.SessionAttributes.EventId + filePath := filepath.Join(utils.TempDestinationFolder, uuidFileName) + data, err := json.Marshal(eventUploadRequest.SessionAttributes.Data) + if err != nil { + log.Error("error in marshaling web sessions", zap.Error(err)) + return + } + err = s3.CreateFileFromByte(filePath, utils.GZExtension.String(), data) + if err != nil { + log.Error("error in creating file for web sessions", zap.Error(err)) + return + } + defer ks.deleteWebSessionFile(filePath + utils.GZExtension.String()) + webSessionUploadBucket := config.GetCollectorConfig().S3Config.WebSessionBucketClientMap[clientName] + _, err = ks.s3Client.UploadFile(webSessionUploadBucket, + utils.TempDestinationFolder, uuidFileName+utils.GZExtension.String(), uuidFileName+utils.GZExtension.String()) + if err != nil { + log.Error("error in s3 upload of web sessions", zap.Error(err)) + return + } + + eventUploadRequest.SessionAttributes.Data = nil + } + err := ks.webSessionsAccessLayer.UploadWebSession(eventUploadRequest, webSessionUploadIndex) + if err != nil { + log.Error("error ingesting web sessions", zap.Error(err)) + return + } +} + +func (ks *WebSessionUploadListener) deleteWebSessionFile(fileName string) { + err := os.Remove(fileName) + if err != nil { + log.Error(fmt.Sprintf("not able to delete the file %s", fileName), zap.Error(err)) + } +} diff --git a/alfred/cmd/collector/listener/web_session_upload_listener_test.go b/alfred/cmd/collector/listener/web_session_upload_listener_test.go new file mode 100644 index 0000000..f8d6a28 --- /dev/null +++ b/alfred/cmd/collector/listener/web_session_upload_listener_test.go @@ -0,0 +1,77 @@ +package listener + +import ( + "alfred/mocks" + "alfred/pkg/log" + "github.com/Shopify/sarama" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "testing" +) + +func TestWebSessionUploadListener_Setup(t *testing.T) { + + log.InitLogger() + consumerGroupSessionMock := &mocks.ConsumerGroupSession{} + testMemberID := "testMemberID" + + ks := &WebSessionUploadListener{} + consumerGroupSessionMock.On("MemberID").Return(testMemberID) + err := ks.Setup(consumerGroupSessionMock) + + assert.Nil(t, err, "err should be nil") + consumerGroupSessionMock.AssertExpectations(t) + +} + +func TestWebSessionUploadListener_Cleanup(t *testing.T) { + + log.InitLogger() + consumerGroupSessionMock := &mocks.ConsumerGroupSession{} + testMemberID := "testMemberID" + + ks := &WebSessionUploadListener{} + consumerGroupSessionMock.On("MemberID").Return(testMemberID) + err := ks.Cleanup(consumerGroupSessionMock) + + assert.Nil(t, err, "err should be nil") + consumerGroupSessionMock.AssertExpectations(t) + +} + +func TestWebSessionUploadListener_ConsumeClaimWithoutSessions(t *testing.T) { + + log.InitLogger() + elasticSearchClientMock := &mocks.MockElasticSearchClient{} + s3ClientMock := &mocks.MockS3Client{} + consumerGroupSessionMock := &mocks.ConsumerGroupSession{} + consumerGroupClaimMock := &mocks.ConsumerGroupClaim{} + consumerGroupMock := &mocks.MockConsumerGroup{} + + messagesChan := make(chan *sarama.ConsumerMessage, 1) + messagesBufferedChan := make(chan *sarama.ConsumerMessage, 1) + mockMessage1 := &sarama.ConsumerMessage{ + Value: []byte("Test Message 1"), + } + messagesChan <- mockMessage1 + close(messagesChan) + go func() { + for msg := range messagesChan { + messagesBufferedChan <- msg + } + close(messagesBufferedChan) + }() + + ks := &WebSessionUploadListener{consumerGroupMock, elasticSearchClientMock, s3ClientMock} + consumerGroupSessionMock.On("MarkMessage", mock.Anything, mock.Anything).Once() + consumerGroupClaimMock.On("Messages").Return((<-chan *sarama.ConsumerMessage)(messagesBufferedChan)).Once() + err := ks.ConsumeClaim(consumerGroupSessionMock, consumerGroupClaimMock) + + assert.Nil(t, err) + consumerGroupSessionMock.AssertExpectations(t) + consumerGroupClaimMock.AssertExpectations(t) + consumerGroupMock.AssertExpectations(t) + elasticSearchClientMock.AssertExpectations(t) + s3ClientMock.AssertExpectations(t) + +} diff --git a/alfred/cmd/collector/main.go b/alfred/cmd/collector/main.go new file mode 100644 index 0000000..ca53e2a --- /dev/null +++ b/alfred/cmd/collector/main.go @@ -0,0 +1,39 @@ +package main + +import ( + "alfred/cmd/collector/app" + "alfred/config" + "alfred/pkg/log" + "os" + "time" + + ginzap "github.com/gin-contrib/zap" + "github.com/gin-gonic/gin" + "github.com/spf13/cobra" + _ "go.uber.org/automaxprocs" + "go.uber.org/zap" +) + +func main() { + log.InitLogger("alfred-collector") + config.LoadCollectorConfig() + + command := &cobra.Command{ + Use: "alfred-collector", + Short: "alfred collector consumes events from kafka", + Long: "alfred collector receive all app events and ingest them into kafka", + RunE: func(cmd *cobra.Command, args []string) error { + r := gin.New() + r.Use(ginzap.Ginzap(log.GetLogger(), time.RFC3339, true)) + r.Use(ginzap.RecoveryWithZap(log.GetLogger(), true)) + sv := app.NewServer(r) + sv.Start() + return nil + }, + } + + if err := command.Execute(); err != nil { + log.Error("alfred collector main command execution failed", zap.Error(err)) + os.Exit(1) + } +} diff --git a/alfred/cmd/core/app/external/customer_service.go b/alfred/cmd/core/app/external/customer_service.go new file mode 100644 index 0000000..2e82873 --- /dev/null +++ b/alfred/cmd/core/app/external/customer_service.go @@ -0,0 +1,65 @@ +package external + +import ( + "alfred/internal/clients" + "alfred/pkg/log" + "alfred/utils" + "errors" + "go.uber.org/zap" +) + +type CustomerService struct { + customerFederationClient *clients.CustomerFederationClient + customerServiceClient *clients.CustomerServiceClient +} + +func NewCustomerService(httpClient *clients.HttpClient) *CustomerService { + return &CustomerService{ + customerFederationClient: &clients.CustomerFederationClient{HttpClient: httpClient.HttpClient}, + customerServiceClient: &clients.CustomerServiceClient{HttpClient: httpClient.HttpClient}, + } +} + +func (s *CustomerService) GetCustomerRefId(externalId string) string { + if utils.ValidateId(externalId, utils.EMPTY) { + referenceId, err := s.customerServiceClient.GetCustomerRefId(externalId) + if err != nil { + log.Error("error getting customer reference id from external id", zap.Error(err)) + return "" + } + return referenceId + } + + return "" +} + +func (s *CustomerService) GetDeviceIds(phoneNumber string, customerId string, deviceIdList []string) ([]string, error) { + var deviceIds []string + if len(deviceIdList) != 0 { + deviceIds = deviceIdList + } else if customerId != utils.EMPTY { + deviceIdsLocal, err := s.customerFederationClient.GetDeviceIdFromCustomerId(customerId) + if err != nil { + log.Error("device Id not found for customer id", zap.String("customerId", customerId), zap.Error(err)) + return nil, err + } + if deviceIdsLocal == nil { + log.Error("device Id not found for customer id", zap.String("customerId", customerId), zap.Error(err)) + return nil, errors.New("deviceIds not found for given customerId") + } + deviceIds = deviceIdsLocal + } else if phoneNumber != utils.EMPTY { + customerId, err := s.customerServiceClient.GetReferenceIdByPhoneNumber(phoneNumber) + if err != nil { + log.Error("customer Id not found for phone number", zap.String("customerId", customerId), zap.Error(err)) + return nil, err + } + deviceIdsLocal, err := s.customerFederationClient.GetDeviceIdFromCustomerId(customerId) + if err != nil || deviceIdsLocal == nil { + log.Error("device Id not found for customer id", zap.String("customerId", customerId), zap.Error(err)) + return nil, err + } + deviceIds = deviceIdsLocal + } + return deviceIds, nil +} diff --git a/alfred/cmd/core/app/external/data_science_service.go b/alfred/cmd/core/app/external/data_science_service.go new file mode 100644 index 0000000..eca24b1 --- /dev/null +++ b/alfred/cmd/core/app/external/data_science_service.go @@ -0,0 +1,47 @@ +package external + +import ( + "alfred/internal/clients" + "alfred/pkg/log" + "alfred/pkg/s3" + "alfred/utils" + "go.uber.org/zap" + "strings" +) + +type DataScienceService struct { + dataScienceServiceClient *clients.DataScienceClient + s3Client s3.S3Client +} + +func NewDataScienceService(httpClient *clients.HttpClient, s3Client s3.S3Client) *DataScienceService { + return &DataScienceService{ + dataScienceServiceClient: &clients.DataScienceClient{HttpClient: httpClient.HttpClient}, + s3Client: s3Client, + } +} + +func (ds *DataScienceService) MaskImages(screen, sessionUploadBucket, toBeMaskedFileName, maskedFileName string) (bool, error) { + // Generate pre-signed URL of the zip file + presignedDownloadUrl, err := ds.s3Client.PresignedDownloadUrl(sessionUploadBucket, toBeMaskedFileName+utils.ZipExtension.String(), toBeMaskedFileName+utils.ZipExtension.String()) + if err != nil { + log.Error("Error occurred while generating presigned url for download in data science service", zap.Error(err), zap.String("toBeMaskedFileName", toBeMaskedFileName)) + return false, err + } + presignedUploadUrl, err := ds.s3Client.PreSignedUploadUrl(sessionUploadBucket, maskedFileName+utils.ZipExtension.String(), utils.ZipExtension.String(), utils.ZipContentType) + if err != nil { + log.Error("Error occurred while generating presigned url for upload in data science service", zap.Error(err), zap.String("maskedFileName", maskedFileName)) + return false, err + } + return ds.dataScienceServiceClient.MaskImages(screen, presignedDownloadUrl, presignedUploadUrl) +} + +// This function downloads and unzips the file in the same folder as original images, hence replaces the original images with masked images +func (ds *DataScienceService) ReplaceOriginalImagesWithDsMaskedImages(sessionUploadBucket, maskedFileName, pathToUnzippedFiles string) (bool, error) { + _, err := ds.s3Client.DownloadAndUnzipFile(sessionUploadBucket, utils.TempDestinationFolder, maskedFileName+utils.ZipExtension.String(), maskedFileName+utils.ZipExtension.String(), strings.TrimPrefix(pathToUnzippedFiles, utils.TempDestinationFolder+utils.FORWARD_SLASH)) + if err != nil { + log.Error("Error occurred while downloading and unzipping the masked file in ds mask strategy", zap.String("maskedFileName", maskedFileName), zap.Error(err)) + return false, err + } + return true, nil +} diff --git a/alfred/cmd/core/app/factory/AppFactory.go b/alfred/cmd/core/app/factory/AppFactory.go new file mode 100644 index 0000000..88a88b0 --- /dev/null +++ b/alfred/cmd/core/app/factory/AppFactory.go @@ -0,0 +1,12 @@ +package factory + +import ( + "alfred/cmd/core/app/service/interfaces" + "alfred/internal/clients" + "alfred/repositoryAccessLayer" +) + +type AppClientFactory interface { + Initialize(repositories *repositoryAccessLayer.RepositoryAccessLayer, httpClient *clients.HttpClient) + CreateAppClient(client string) (interfaces.AppClient, error) +} diff --git a/alfred/cmd/core/app/factory/CosmosAndroidAppSessionFactory.go b/alfred/cmd/core/app/factory/CosmosAndroidAppSessionFactory.go new file mode 100644 index 0000000..ee540fe --- /dev/null +++ b/alfred/cmd/core/app/factory/CosmosAndroidAppSessionFactory.go @@ -0,0 +1,32 @@ +package factory + +import ( + "alfred/cmd/core/app/service" + "alfred/cmd/core/app/service/interfaces" + "alfred/internal/clients" + "alfred/repositoryAccessLayer" + "alfred/utils" + "errors" +) + +type CosmosAndroidAppSessionFactory struct { + Repositories *repositoryAccessLayer.RepositoryAccessLayer + HttpClient *clients.HttpClient +} + +func (f *CosmosAndroidAppSessionFactory) CreateAppClient(client string) (interfaces.AppClient, error) { + if client == utils.COSMOS { + return f.CreateNewCosmosAndroidAppClient(), nil + } + return nil, errors.New("invalid client name for CosmosAndroidAppSessionFactory") +} + +func (f *CosmosAndroidAppSessionFactory) Initialize(repositories *repositoryAccessLayer.RepositoryAccessLayer, + httpClient *clients.HttpClient) { + f.Repositories = repositories + f.HttpClient = httpClient +} + +func (f *CosmosAndroidAppSessionFactory) CreateNewCosmosAndroidAppClient() *service.AppSessionCosmos { + return service.NewAppSessionCosmos(f.Repositories) +} diff --git a/alfred/cmd/core/app/factory/NaviAndroidAppSessionFactory.go b/alfred/cmd/core/app/factory/NaviAndroidAppSessionFactory.go new file mode 100644 index 0000000..99115f3 --- /dev/null +++ b/alfred/cmd/core/app/factory/NaviAndroidAppSessionFactory.go @@ -0,0 +1,31 @@ +package factory + +import ( + "alfred/cmd/core/app/service" + "alfred/cmd/core/app/service/interfaces" + "alfred/internal/clients" + "alfred/repositoryAccessLayer" + "alfred/utils" + "errors" +) + +type NaviAndroidAppSessionFactory struct { + Repositories *repositoryAccessLayer.RepositoryAccessLayer + HttpClient *clients.HttpClient +} + +func (f *NaviAndroidAppSessionFactory) CreateAppClient(client string) (interfaces.AppClient, error) { + if client == utils.NAVI_USER_APP { + return f.CreateNewNaviAndroidAppClient(), nil + } + return nil, errors.New("invalid client name for NaviAndroidAppSessionFactory") +} + +func (f *NaviAndroidAppSessionFactory) Initialize(repositories *repositoryAccessLayer.RepositoryAccessLayer, httpClient *clients.HttpClient) { + f.Repositories = repositories + f.HttpClient = httpClient +} + +func (f *NaviAndroidAppSessionFactory) CreateNewNaviAndroidAppClient() *service.AppSessionNaviApp { + return service.NewAppSessionNaviApp(f.Repositories, f.HttpClient) +} diff --git a/alfred/cmd/core/app/factory/NaviIosAppSessionFactory.go b/alfred/cmd/core/app/factory/NaviIosAppSessionFactory.go new file mode 100644 index 0000000..abaccc6 --- /dev/null +++ b/alfred/cmd/core/app/factory/NaviIosAppSessionFactory.go @@ -0,0 +1,32 @@ +package factory + +import ( + "alfred/cmd/core/app/service" + "alfred/cmd/core/app/service/interfaces" + "alfred/internal/clients" + "alfred/repositoryAccessLayer" + "alfred/utils" + "errors" +) + +type NaviIosAppSessionFactory struct { + Repositories *repositoryAccessLayer.RepositoryAccessLayer + HttpClient *clients.HttpClient +} + +func (f *NaviIosAppSessionFactory) CreateAppClient(client string) (interfaces.AppClient, error) { + if client == utils.NAVI_USER_APP_IOS { + return f.CreateNewNaviIosAppClient(), nil + } + return nil, errors.New("invalid client name for NaviIosAppSessionFactory") + +} + +func (f *NaviIosAppSessionFactory) Initialize(repositories *repositoryAccessLayer.RepositoryAccessLayer, httpClient *clients.HttpClient) { + f.Repositories = repositories + f.HttpClient = httpClient +} + +func (f *NaviIosAppSessionFactory) CreateNewNaviIosAppClient() *service.AppSessionNaviAppIos { + return service.NewAppSessionNaviAppIos(f.Repositories, f.HttpClient) +} diff --git a/alfred/cmd/core/app/handler/app_client_manager.go b/alfred/cmd/core/app/handler/app_client_manager.go new file mode 100644 index 0000000..99c40c5 --- /dev/null +++ b/alfred/cmd/core/app/handler/app_client_manager.go @@ -0,0 +1,45 @@ +package handler + +import ( + "alfred/cmd/core/app/factory" + "alfred/cmd/core/app/service/interfaces" + "alfred/internal/clients" + "alfred/pkg/log" + "alfred/repositoryAccessLayer" + "alfred/utils" + "errors" +) + +type AppClientManager interface { + GetAppClientByName(clientName string) (interfaces.AppClient, error) +} + +type AppClientManagerImpl struct { + AppClientFactories map[string]factory.AppClientFactory + HttpClient *clients.HttpClient + Repositories *repositoryAccessLayer.RepositoryAccessLayer +} + +func NewAppClientManagerImpl(httpClient *clients.HttpClient, + repositories *repositoryAccessLayer.RepositoryAccessLayer) *AppClientManagerImpl { + AppClientFactories := map[string]factory.AppClientFactory{ + utils.NAVI_USER_APP_IOS: &factory.NaviIosAppSessionFactory{}, + utils.NAVI_USER_APP: &factory.NaviAndroidAppSessionFactory{}, + utils.COSMOS: &factory.CosmosAndroidAppSessionFactory{}, + } + return &AppClientManagerImpl{ + AppClientFactories: AppClientFactories, + HttpClient: httpClient, + Repositories: repositories, + } +} + +func (acm *AppClientManagerImpl) GetAppClientByName(clientName string) (interfaces.AppClient, error) { + appClientFactory, found := acm.AppClientFactories[clientName] + if !found { + log.Error("invalid client name: " + clientName) + return nil, errors.New("invalid client name: " + clientName) + } + appClientFactory.Initialize(acm.Repositories, acm.HttpClient) + return appClientFactory.CreateAppClient(clientName) +} diff --git a/alfred/cmd/core/app/handler/app_session_handler.go b/alfred/cmd/core/app/handler/app_session_handler.go new file mode 100644 index 0000000..da47605 --- /dev/null +++ b/alfred/cmd/core/app/handler/app_session_handler.go @@ -0,0 +1,330 @@ +package handler + +import ( + "alfred/api/request" + "alfred/api/response" + "alfred/cmd/core/app/helper" + "alfred/config" + "alfred/internal/clients" + "alfred/model/common" + "alfred/model/es" + "alfred/model/ingester" + "alfred/pkg/log" + "alfred/pkg/s3" + "alfred/repositoryAccessLayer" + "alfred/utils" + "errors" + "fmt" + "github.com/gin-gonic/gin" + "github.com/u2takey/go-utils/slice" + "go.uber.org/zap" + "math" + "net/http" + "strings" + "time" +) + +type AppSessionHandler struct { + eventsAccessLayer repositoryAccessLayer.EventsAccessLayer + sessionAccessLayer repositoryAccessLayer.SessionsAccessLayer + s3Client s3.S3Client + appClientManager AppClientManager +} + +func NewAppSessionHandler(repositories *repositoryAccessLayer.RepositoryAccessLayer, s3Client s3.S3Client, httpClient *clients.HttpClient) *AppSessionHandler { + return &AppSessionHandler{ + eventsAccessLayer: repositories.EventsAccessLayer, + sessionAccessLayer: repositories.SessionsAccessLayer, + s3Client: s3Client, + appClientManager: NewAppClientManagerImpl(httpClient, repositories), + } +} + +func (s *AppSessionHandler) FetchAppSessions(c *gin.Context) { + customerId := c.Query("customer_id") + deviceId := c.Query("device_id") + phoneNumber := c.Query("phone_number") + sessionId := c.Query("session_id") + labelFilters := c.Query("labels") + appName := c.Query("app_version_name") + screenName := c.Query("screen_name") + fragmentNames := c.Query("fragment_name") + vertical := c.Query("vertical") + appVersion := c.Query("app_version_code") + screenTag := c.Query("screen_tag") + codePushVersion := c.Query("code_push_version") + agentEmailId := c.Query("agent_email_id") + snapshotPerSecond := c.Query("snapshot_per_second") + sortBy := helper.AppSortingMapper(c.Query("sort_by")) + + clientName, err := helper.ValidateAPIKeyHeaders(c) + appOs := config.GetCoreConfig().ClientAppOsMap[clientName] + if err != nil { + log.Error("invalid api key", zap.Error(err)) + c.JSON(http.StatusBadRequest, utils.ErrorResponse(err, http.StatusBadRequest, nil)) + return + } + + clientHandler, err := s.appClientManager.GetAppClientByName(clientName) + + if err != nil { + log.Error("no client handler available for ", zap.String("client", clientName), zap.Error(err)) + } + + sessionUploadIndex := config.GetCoreConfig().ElasticSearchConfig.SessionUploadIndexClientMap[clientName] + + eventIngestionIndex := config.GetCoreConfig().ElasticSearchConfig.EventIngestionIndexClientMap[clientName] + + startTimestamp, endTimestamp, err := utils.ValidateTimestamps(c.Query("start_time"), c.Query("end_time")) + if err != nil { + log.Error("error in query parameters", zap.String("customerId", customerId), + zap.String("deviceId", deviceId), zap.Error(err)) + c.JSON(http.StatusBadRequest, utils.ErrorResponse(err, http.StatusBadRequest, nil)) + return + } + + maxNumHours := config.GetCoreConfig().DefaultSessionTime + if endTimestamp-((maxNumHours * time.Hour).Milliseconds()) > startTimestamp { + endTimestamp = utils.GetCurrentTimeInMillis() + startTimestamp = endTimestamp - (maxNumHours * time.Hour).Milliseconds() + } + + pageSize, pageNumber, sortDirection, err := utils.ValidatePage(c.Query("page_size"), c.Query("page_number"), c.Query("sort_direction")) + if err != nil { + log.Error("error in query parameters", zap.String("customerId", customerId), + zap.String("deviceId", deviceId), zap.Error(err)) + c.JSON(http.StatusBadRequest, utils.ErrorResponse(err, http.StatusBadRequest, nil)) + return + } + + page := es.Page{ + PageSize: pageSize, + PageNumber: pageNumber, + SortDirection: es.SortDirection(sortDirection), + } + + //fetch session + sessionRequest := request.SessionFilters{ + DeviceId: deviceId, + CustomerId: customerId, + PhoneNumber: phoneNumber, + SessionId: sessionId, + StartTimestamp: startTimestamp, + EndTimestamp: endTimestamp, + Labels: labelFilters, + AppName: appName, + ScreenName: screenName, + FragmentNames: fragmentNames, + Vertical: vertical, + AppVersion: appVersion, + ScreenTags: screenTag, + CodePushVersion: codePushVersion, + AgentEmailId: agentEmailId, + SnapshotPerSecond: snapshotPerSecond, + SortBy: sortBy, + AppOs: appOs, + } + var sessions []es.SessionResponse + + sessions, err = clientHandler.FetchSessionDetails(sessionRequest, &page, sessionUploadIndex, eventIngestionIndex) + + if err != nil { + log.Error("could not find any session for given inputs", + zap.String("customerId", customerId), zap.Error(err)) + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, response.SessionErrorData{ + DeviceId: deviceId, CustomerId: customerId, PhoneNumber: phoneNumber}), + ) + return + } + log.Info(fmt.Sprintf("sessions found"), zap.String("customerId", customerId), zap.String("phoneNumber", phoneNumber), zap.String("deviceId", deviceId)) + + var sessionResponse []response.SearchSessionResponseData + + sessionResponse, err = clientHandler.CreateBucketsForSessionForResponse(sessions) + + if customerId != utils.EMPTY { + sessionResponse = s.filterSessionsForCustomerId(sessionResponse, customerId) + } + + var genericResponse []common.Response + for _, session := range sessionResponse { + labels, touchCounts := s.fetchLabelsForSession(session.BaseAttributesDTO.SessionId, labelFilters, eventIngestionIndex, false) + + var touchCountResponse int64 + if touchCounts != nil { + touchCountResponse = *touchCounts + } + //insert if else here? + genericResponse = utils.AddDataToResponse(response.SearchSessionResponseData{ + DeviceAttributes: session.DeviceAttributes, + BaseAttributesDTO: session.BaseAttributesDTO, + Labels: labels, + Metadata: session.Metadata, + TouchCounts: touchCountResponse, + CreatedAt: session.CreatedAt, + }, http.StatusOK, genericResponse) + } + //map will change the order of insertion so sorting is needed + if page.SortDirection == es.ASC { + utils.SortAscSession(&genericResponse) + } else { + utils.SortSession(&genericResponse) + } + + if genericResponse == nil { + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(errors.New(utils.NO_SESSION_FOUND), http.StatusInternalServerError, nil)) + } + + c.JSON(http.StatusMultiStatus, utils.SuccessPaginatedResponse(genericResponse, common.Page{ + PageSize: len(genericResponse), + TotalPages: int64(math.Ceil(float64(page.TotalSize) / float64(page.PageSize))), + PageNumber: pageNumber, + TotalElements: page.TotalSize, + }, http.StatusMultiStatus)) + + if err != nil { + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, response.SessionErrorData{ + DeviceId: deviceId, CustomerId: customerId, PhoneNumber: phoneNumber}), + ) + return + } +} + +func (s *AppSessionHandler) FetchAppSessionDetails(c *gin.Context) { + sessionId := c.Query("session_id") + + clientName, err := helper.ValidateAPIKeyHeaders(c) + if err != nil { + log.Error("invalid api key", zap.Error(err)) + c.JSON(http.StatusBadRequest, utils.ErrorResponse(err, http.StatusBadRequest, nil)) + return + } + + clientHandler, err := s.appClientManager.GetAppClientByName(clientName) + + if err != nil { + log.Error("no client handler available for ", zap.String("client", clientName), zap.Error(err)) + } + + sessionUploadIndex := config.GetCoreConfig().ElasticSearchConfig.SessionUploadIndexClientMap[clientName] + eventIngestionIndex := config.GetCoreConfig().ElasticSearchConfig.EventIngestionIndexClientMap[clientName] + + var sessions []es.SessionResponse + + page := es.Page{ + PageSize: utils.SessionUpperLimit, + PageNumber: 0, + SortDirection: es.SortDirection(common.ASC), + } + + sessions, err = s.sessionAccessLayer.FetchSessionsWithSessionIds([]string{sessionId}, &page, sessionUploadIndex) + + if err != nil { + log.Error("could not find any session for given inputs", zap.String("sessionId", sessionId), zap.Error(err)) + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, response.SessionErrorData{ + SessionId: sessionId}), + ) + return + } + log.Info(fmt.Sprintf("sessions found"), zap.String("sessionId", sessionId)) + + var sessionResponse []response.SearchSessionResponseData + sessionResponse, err = clientHandler.CreateBucketsForSessionForResponse(sessions) + + var genericResponse []common.Response + for _, session := range sessionResponse { + labels, touchCounts := s.fetchLabelsForSession(session.BaseAttributesDTO.SessionId, utils.EMPTY, eventIngestionIndex, true) + + var touchCountResponse int64 + if touchCounts != nil { + touchCountResponse = *touchCounts + } + + genericResponse = utils.AddDataToResponse(response.SearchSessionResponseData{ + DeviceAttributes: session.DeviceAttributes, + BaseAttributesDTO: session.BaseAttributesDTO, + Labels: labels, + Metadata: session.Metadata, + TouchCounts: touchCountResponse, + }, http.StatusOK, genericResponse) + } + + if genericResponse == nil { + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(errors.New(utils.NO_SESSION_FOUND), http.StatusInternalServerError, nil)) + } + + c.JSON(http.StatusMultiStatus, utils.SuccessResponse(genericResponse, http.StatusOK)) + + if err != nil { + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, response.SessionErrorData{ + SessionId: sessionId}), + ) + return + } +} + +func (s *AppSessionHandler) fetchLabelsForSession(sessionId, labelFilters string, eventIngestionIndex string, fetchTouchCounts bool) ([]string, *int64) { + + validLabels := strings.Split(labelFilters, utils.COMMA) + if labelFilters == "" { + validLabels = []string{ingester.ERROR_LOG, ingester.CRASH_ANALYTICS_EVENT, ingester.ANR_EVENT} + } + res, touchCounts, err := s.eventsAccessLayer.FetchEventsFromSessionId(sessionId, &es.Page{}, eventIngestionIndex, fetchTouchCounts) + if err != nil { + log.Error("No data for sessionId", zap.String("sessionId", sessionId), zap.Error(err)) + return nil, nil + } + var labels []string + for _, esResponse := range res { + esResponseLocal := esResponse + if helper.IsValidLabel(esResponseLocal.Source.EventAttributes.EventName, validLabels) && + !slice.ContainsString(labels, esResponseLocal.Source.EventAttributes.EventName, nil) { + labels = append(labels, esResponseLocal.Source.EventAttributes.EventName) + } + } + return labels, touchCounts +} + +func (s *AppSessionHandler) filterSessionsForCustomerId(sessions []response.SearchSessionResponseData, customerId string) []response.SearchSessionResponseData { + var customerSessions []response.SearchSessionResponseData + for _, session := range sessions { + if customerId == session.BaseAttributesDTO.CustomerId { + customerSessions = append(customerSessions, session) + } + } + if len(customerSessions) == 0 { + return sessions + } + return customerSessions +} + +func (s *AppSessionHandler) FetchEvents(c *gin.Context) { + sessionId := c.GetHeader("session-id") + client, err := helper.ValidateAPIKeyHeaders(c) + if err != nil { + c.JSON(http.StatusBadRequest, utils.ErrorResponse(err, http.StatusBadRequest, nil)) + return + } + + eventIngestionIndex := config.GetCoreConfig().ElasticSearchConfig.EventIngestionIndexClientMap[client] + + page := es.Page{ + PageSize: 30, + PageNumber: 0, + SortDirection: "desc", + } + if len(sessionId) == 0 { + err := errors.New("sessionId is required in headers") + log.Error("Bad Request", zap.String("sessionId", sessionId), zap.Error(err)) + c.JSON(http.StatusBadRequest, utils.ErrorResponse(err, http.StatusBadRequest, nil)) + return + } + + res, _, err := s.eventsAccessLayer.FetchEventsFromSessionId(sessionId, &page, eventIngestionIndex, true) + if err != nil { + log.Error("Bad Request", zap.String("sessionId", sessionId), zap.Error(err)) + c.JSON(http.StatusBadRequest, utils.ErrorResponse(err, http.StatusBadRequest, nil)) + return + } + c.JSON(http.StatusOK, utils.SuccessResponse(res, http.StatusOK)) +} diff --git a/alfred/cmd/core/app/handler/cruise_control_handler.go b/alfred/cmd/core/app/handler/cruise_control_handler.go new file mode 100644 index 0000000..11ab790 --- /dev/null +++ b/alfred/cmd/core/app/handler/cruise_control_handler.go @@ -0,0 +1,161 @@ +package handler + +import ( + "alfred/api/response" + "alfred/cmd/core/app/helper" + "alfred/config" + "alfred/internal/clients" + "alfred/model/core/cruise" + "alfred/pkg/log" + "alfred/repositoryAccessLayer" + "alfred/utils" + "encoding/json" + "fmt" + "github.com/gin-gonic/gin" + "go.uber.org/zap" + "net/http" + "strings" +) + +type CruiseControlHandler struct { + cruiseControlAccessLayer repositoryAccessLayer.CruiseControlAccessLayer + alfredIngestorClient *clients.AlfredIngestorClient +} + +func NewCruiseControlHandler(repositories *repositoryAccessLayer.RepositoryAccessLayer, ingestorClient *clients.AlfredIngestorClient) *CruiseControlHandler { + return &CruiseControlHandler{ + cruiseControlAccessLayer: repositories.CruiseControlAccessLayer, + alfredIngestorClient: ingestorClient, + } +} + +func (cc *CruiseControlHandler) FetchCruiseControlConfig(c *gin.Context) { + appVersionName := c.Query("appVersionName") + client, err := helper.ValidateAPIKeyHeaders(c) + if err != nil { + c.JSON(http.StatusBadRequest, utils.ErrorResponse(err, http.StatusBadRequest, nil)) + return + } + appOs := strings.ToLower(config.GetCoreConfig().ClientAppOsMap[client]) + cruiseControlIndex := config.GetCoreConfig().ElasticSearchConfig.CruiseControlIndexClientMap[client] + response, err := cc.cruiseControlAccessLayer.FetchCruiseControlConfig(appVersionName, appOs, cruiseControlIndex) + if err != nil { + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, nil)) + return + } + c.JSON(http.StatusOK, utils.SuccessResponse(response.Hits.Hits, http.StatusOK)) +} + +func (cc *CruiseControlHandler) CreateCruiseControlConfig(c *gin.Context) { + + client, err := helper.ValidateAPIKeyHeaders(c) + if err != nil { + c.JSON(http.StatusBadRequest, utils.ErrorResponse(err, http.StatusBadRequest, nil)) + return + } + + var cruiseControlRequest cruise.ControlConfig + + if err := c.ShouldBindJSON(&cruiseControlRequest); err != nil { + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, nil)) + return + } + + log.Info("create cruise config", zap.String("body", fmt.Sprintf("%v", cruiseControlRequest)), + zap.String("email", c.GetHeader("X-User-Email")), zap.String("sessionToken", c.GetHeader("X-Session-Token"))) + + appOs := strings.ToLower(config.GetCoreConfig().ClientAppOsMap[client]) + + //to be removed when fixed from ui + cruiseControlRequest.Type = cruise.OsType(appOs) + + cruiseControlIndex := config.GetCoreConfig().ElasticSearchConfig.CruiseControlIndexClientMap[client] + updateCheck, err := cc.cruiseControlAccessLayer.FetchCruiseControlConfig(cruiseControlRequest.OsConfig.AppVersion, appOs, cruiseControlIndex) + if err != nil { + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, nil)) + return + } + if len(updateCheck.Hits.Hits) == 0 { + cruiseControlRequest.ConfigTime = utils.GetCurrentTimeInMillis() + } else { + cruiseControlRequest.ConfigTime = (int64)(updateCheck.Hits.Hits[0].Source["config_time"].(float64)) + } + + err = cc.cruiseControlAccessLayer.CreateCruiseControlConfig(&cruiseControlRequest, cruiseControlIndex) + if err != nil { + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, nil)) + return + } + + _, err = cc.alfredIngestorClient.InvalidateCache(cruiseControlRequest.OsConfig.AppVersion, client) + if err != nil { + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, nil)) + return + } + + c.JSON(http.StatusOK, utils.SuccessResponse(nil, http.StatusOK)) +} + +func (cc *CruiseControlHandler) FetchAllCruiseControlConfigAppVersions(c *gin.Context) { + + client, err := helper.ValidateAPIKeyHeaders(c) + if err != nil { + c.JSON(http.StatusBadRequest, utils.ErrorResponse(err, http.StatusBadRequest, nil)) + return + } + + appOs := strings.ToLower(config.GetCoreConfig().ClientAppOsMap[client]) + cruiseControlIndex := config.GetCoreConfig().ElasticSearchConfig.CruiseControlIndexClientMap[client] + + values, err := cc.cruiseControlAccessLayer.FetchAllCruiseControlAppVersions(cruiseControlIndex, appOs) + if err != nil { + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, nil)) + return + } + + res := helper.MapToFilterData(values) + c.JSON(http.StatusOK, utils.SuccessResponse(res, http.StatusOK)) +} + +func (cc *CruiseControlHandler) FetchDropdowns(c *gin.Context) { + + client, err := helper.ValidateAPIKeyHeaders(c) + if err != nil { + c.JSON(http.StatusBadRequest, utils.ErrorResponse(err, http.StatusBadRequest, nil)) + return + } + + dropdown := cc.getDropdownResponse() + + appOs := strings.ToLower(config.GetCoreConfig().ClientAppOsMap[client]) + cruiseControlIndex := config.GetCoreConfig().ElasticSearchConfig.CruiseControlIndexClientMap[client] + + values, err := cc.cruiseControlAccessLayer.FetchAllCruiseControlAppVersions(cruiseControlIndex, appOs) + if err != nil { + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, nil)) + return + } + + dropdown = append(dropdown, response.DropdownResponseData{ + DropdownKey: "app_versions", + FilterData: helper.MapToFilterData(values), + SelectionConfig: response.SINGLE_SELECT, + }) + c.JSON(http.StatusOK, utils.SuccessResponse(dropdown, http.StatusOK)) + + return +} + +func (cc *CruiseControlHandler) getDropdownResponse() []response.DropdownResponseData { + + dropDownConfig := config.GetCoreConfig().CruiseDropdowns + + var configMap []response.DropdownResponseData + + err := json.Unmarshal([]byte(dropDownConfig), &configMap) + if err != nil { + return []response.DropdownResponseData{} + } + + return configMap +} diff --git a/alfred/cmd/core/app/handler/error_events_handler.go b/alfred/cmd/core/app/handler/error_events_handler.go new file mode 100644 index 0000000..37e7388 --- /dev/null +++ b/alfred/cmd/core/app/handler/error_events_handler.go @@ -0,0 +1,44 @@ +package handler + +import ( + "alfred/cmd/core/app/helper" + "alfred/config" + "alfred/repositoryAccessLayer" + "alfred/utils" + "errors" + "github.com/gin-gonic/gin" + "net/http" +) + +type ErrorEventsHandler interface { +} + +type ErrorEventsHandlerImpl struct { + errorEventsAccessLayer repositoryAccessLayer.ErrorEventsAccessLayer +} + +func NewErrorEventsHandler(repositories *repositoryAccessLayer.RepositoryAccessLayer) *ErrorEventsHandlerImpl { + return &ErrorEventsHandlerImpl{ + errorEventsAccessLayer: repositories.ErrorEventsAccessLayer, + } +} + +func (s *ErrorEventsHandlerImpl) FetchErrorEvents(c *gin.Context) { + sessionId := c.Query(utils.SESSION_ID) + clientName, err := helper.ValidateAPIKeyHeaders(c) + if err != nil { + c.JSON(http.StatusBadRequest, utils.ErrorResponse(err, http.StatusBadRequest, nil)) + return + } + errorEventsIndex := config.GetCoreConfig().ElasticSearchConfig.ErrorEventsUploadIndexClientMap[clientName] + errorEventsResponse, err := s.errorEventsAccessLayer.FetchErrorEvents(sessionId, errorEventsIndex) + if err != nil { + if err == errors.New(utils.NO_SESSION_FOUND) { + c.JSON(http.StatusBadRequest, utils.ErrorResponse(err, http.StatusBadRequest, nil)) + return + } + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, nil)) + return + } + c.JSON(http.StatusOK, utils.SuccessResponse(errorEventsResponse, http.StatusOK)) +} diff --git a/alfred/cmd/core/app/handler/filter_handler.go b/alfred/cmd/core/app/handler/filter_handler.go new file mode 100644 index 0000000..8e06076 --- /dev/null +++ b/alfred/cmd/core/app/handler/filter_handler.go @@ -0,0 +1,59 @@ +package handler + +import ( + "alfred/cmd/core/app/helper" + "alfred/cmd/core/app/service" + "alfred/config" + "alfred/pkg/log" + "alfred/repositoryAccessLayer" + "alfred/utils" + "github.com/gin-gonic/gin" + "go.uber.org/zap" + "net/http" +) + +type FilterConfig struct { + filterNaviService *service.FilterNavi + filterCosmosService *service.FilterCosmos + filterNaviAppIosService *service.FilterNaviAppIos + filterWebService service.FilterWeb +} + +func NewFilterConfigHandler(repositories *repositoryAccessLayer.RepositoryAccessLayer) *FilterConfig { + return &FilterConfig{ + filterNaviService: service.NewFilterNavi(repositories), + filterCosmosService: service.NewFilterCosmos(repositories), + filterNaviAppIosService: service.NewFilterNaviAppIos(repositories), + filterWebService: service.NewFilterWeb(), + } +} + +func (fc *FilterConfig) FetchFilterConfig(c *gin.Context) { + + client, err := helper.ValidateAPIKeyHeaders(c) + if err != nil { + c.JSON(http.StatusBadRequest, utils.ErrorResponse(err, http.StatusBadRequest, nil)) + return + } + + eventIngestionIndex := config.GetCoreConfig().ElasticSearchConfig.EventIngestionIndexClientMap[client] + + switch client { + case utils.NAVI_USER_APP: + c.JSON(http.StatusOK, utils.SuccessResponse(fc.filterNaviService.CreateFilterResponse(eventIngestionIndex), http.StatusOK)) + case utils.COSMOS: + c.JSON(http.StatusOK, utils.SuccessResponse(fc.filterCosmosService.CreateFilterResponse(eventIngestionIndex), http.StatusOK)) + case utils.NAVI_USER_APP_IOS: + c.JSON(http.StatusOK, utils.SuccessResponse(fc.filterNaviAppIosService.CreateFilterResponse(eventIngestionIndex), http.StatusOK)) + } +} + +func (fc *FilterConfig) FetchWebFilters(c *gin.Context) { + _, err := helper.ValidateAPIKeyHeaders(c) + if err != nil { + log.Error(utils.INVALID_WEB_CLIENT, zap.Error(err)) + c.JSON(http.StatusBadRequest, utils.ErrorResponse(err, http.StatusBadRequest, nil)) + return + } + c.JSON(http.StatusOK, utils.SuccessResponse(fc.filterWebService.CreateFilterResponse(), http.StatusOK)) +} diff --git a/alfred/cmd/core/app/handler/media_handler.go b/alfred/cmd/core/app/handler/media_handler.go new file mode 100644 index 0000000..6a56db5 --- /dev/null +++ b/alfred/cmd/core/app/handler/media_handler.go @@ -0,0 +1,913 @@ +package handler + +import ( + "alfred/api/response" + "alfred/cmd/core/app/external" + "alfred/cmd/core/app/helper" + "alfred/config" + "alfred/internal/clients" + "alfred/internal/metrics" + "alfred/mapper" + "alfred/model/common" + "alfred/model/core" + "alfred/model/es" + "alfred/model/ingester" + "alfred/pkg/ffmpeg" + "alfred/pkg/limiter" + "alfred/pkg/log" + "alfred/pkg/s3" + "alfred/repositoryAccessLayer" + "alfred/utils" + "context" + "encoding/json" + "errors" + "fmt" + "github.com/gin-gonic/gin" + "github.com/google/uuid" + "go.uber.org/zap" + "math" + "net/http" + "path/filepath" + "sort" + "strings" + "time" +) + +type MediaHandler struct { + sessionsAccessLayer repositoryAccessLayer.SessionsAccessLayer + eventsAccessLayer repositoryAccessLayer.EventsAccessLayer + videoGenerationStatusAccessLayer repositoryAccessLayer.VideoGenerationStatusAccessLayer + s3Client s3.S3Client + customerService *external.CustomerService + zipProcessor *ZipProcessor +} + +func NewMediaHandler(repositories *repositoryAccessLayer.RepositoryAccessLayer, s3Client s3.S3Client, httpClient *clients.HttpClient) *MediaHandler { + return &MediaHandler{ + sessionsAccessLayer: repositories.SessionsAccessLayer, + eventsAccessLayer: repositories.EventsAccessLayer, + s3Client: s3Client, + customerService: external.NewCustomerService(httpClient), + videoGenerationStatusAccessLayer: repositories.VideoGenerationStatusAccessLayer, + zipProcessor: NewZipProcessor(repositories, s3Client, httpClient), + } +} + +func (m *MediaHandler) RequestVideo(c *gin.Context) { + + sessionId := c.Query(utils.SESSION_ID) + requesterEmailId := c.GetHeader(utils.USER_EMAIL_HEADER) + + client, err := helper.ValidateAPIKeyHeaders(c) + if err != nil { + c.JSON(http.StatusBadRequest, utils.ErrorResponse(err, http.StatusBadRequest, nil)) + return + } + + sessionUploadIndex := config.GetCoreConfig().ElasticSearchConfig.SessionUploadIndexClientMap[client] + eventIngestionIndex := config.GetCoreConfig().ElasticSearchConfig.EventIngestionIndexClientMap[client] + sessionUploadBucket := config.GetCoreConfig().S3Config.SessionUploadBucketClientMap[client] + videoUploadBucket := config.GetCoreConfig().S3Config.VideoUploadBucketClientMap[client] + videoGenerationStatusIndexClientMap := config.GetCoreConfig().ElasticSearchConfig.VideoGenerationStatusIndexClientMap + + videoGenerationStatusIndex := videoGenerationStatusIndexClientMap[client] + + log.Info("Video generation request received", zap.String(utils.SESSION_ID, sessionId), zap.String("email", requesterEmailId), + zap.String("X-Session-Token", c.GetHeader("sessionToken"))) + + page := es.Page{ + PageSize: utils.EsUpperLimit, + PageNumber: 0, + SortDirection: es.DESC, + } + + sessionResponse, err := m.sessionsAccessLayer.FetchSessionsWithSessionIds([]string{sessionId}, &page, sessionUploadIndex) + if err != nil { + log.Error("could not find any session for given inputs", zap.String(utils.SESSION_ID, sessionId), zap.Error(err)) + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, response.SessionErrorData{SessionId: sessionId})) + return + } + imageType := utils.DOT + sessionResponse[0].Source.BaseAttributes.ImageType + fileTypeExtension := sessionResponse[0].Source.BaseAttributes.FileTypeExtension + + eventBucketsPerSession, deviceIdForSession, esResponsePerSession, snapshotPerSecond := helper.CreateBucketsForSession(sessionResponse) + + var genericResponse []common.Response + countForSuccessResponse := 0 + for sessionId, events := range *eventBucketsPerSession { + if len(events) != 0 { + deviceId, _ := (*deviceIdForSession)[sessionId] + esResponse, _ := (*esResponsePerSession)[sessionId] + indexName := esResponse[0].Index + appVersionCode := esResponse[0].Source.AppVersionCode + + videoGenerationStatus, err := m.videoGenerationStatusAccessLayer.FetchVideoGenerationStatus(sessionId, videoGenerationStatusIndex) + if err != nil { + log.Error("Error occurred while fetching video generation status", zap.String("sessionId", sessionId), zap.Error(err)) + return + } + + result, err := mapper.MapESApiResponseToESResponse(videoGenerationStatus) + if err != nil { + log.Error("Error occurred while mapping videoGenerationStatus response", zap.String("sessionId", sessionId), zap.Error(err)) + return + } + // Check if the video request is being done for the first time + if len(result.Hits.Hits) == 0 { + // do asyncDSMasking + //go func() { + // err := m.asyncDSMasking(sessionId, events, sessionUploadBucket, eventIngestionIndex, videoGenerationStatusIndex, client, esResponse) + // if err != nil { + // log.Error("Error occurred while applying asyncDSMasking", zap.String("sessionId", sessionId), zap.Error(err)) + // } + //}() + + // Register video generation entity for first time + m.createVideoGenerationStatusEntity(sessionId, videoGenerationStatusIndex, requesterEmailId, events, 0, 0, es.VideoGenerationStatusResponse{}) + + // Download and unzip files of the first zip of the session + generatedUuid, _ := uuid.NewUUID() + uuidSessionId := sessionId + utils.HYPHEN + generatedUuid.String() + pathToUnzippedFiles, err := m.s3Client.DownloadAndUnzipFileWithExtension(sessionUploadBucket, + utils.TempDestinationFolder, events[0]+fileTypeExtension, events[0]+fileTypeExtension, uuidSessionId, fileTypeExtension) + + // Process the zips + processedFilesPath, isProcessed, err := m.zipProcessor.ProcessZip(pathToUnzippedFiles, sessionId, eventIngestionIndex, indexName, client, appVersionCode, events[0], imageType, false) + if err != nil { + log.Error("Error occurred while processing zips", zap.String("sessionId", sessionId), zap.Error(err)) + } + + // In case the zip has been processed, generate new name of zip and upload it to s3 bucket + processedFileName := events[0] + toBeDeletedZips := []string{events[0]} + if isProcessed { + processedFileName = events[0] + utils.HYPHEN + utils.PROCESS_FILE_NAME_SUFFIX + + zipFilePath := filepath.Join(utils.TempDestinationFolder, processedFileName+fileTypeExtension) + switch fileTypeExtension { + case utils.ZipExtension.String(): + err := helper.CreateZipFile(zipFilePath, processedFilesPath, imageType, []string{}) + if err != nil { + log.Error("Error occurred while creating the zip file", zap.String("sessionId", sessionId), zap.String("fileName", processedFileName), zap.Error(err)) + return + } + case utils.ZipXzExtension.String(): + err = helper.CreateZipXzFile(zipFilePath, processedFilesPath, imageType, []string{}) + if err != nil { + metrics.MediaGenerationFailureCounter.WithLabelValues(fileTypeExtension) + log.Error("Error occurred while creating the zip.xz file", zap.String("sessionId", sessionId), zap.String("fileName", processedFileName), zap.Error(err)) + return + } + default: + log.Error("Unsupported file extension", zap.String("fileTypeExtension", fileTypeExtension)) + return + } + + uploadResponse, err := m.s3Client.UploadFile(sessionUploadBucket, utils.TempDestinationFolder, processedFileName+fileTypeExtension, processedFileName+fileTypeExtension) + if err != nil { + log.Error("failed to upload processed to s3", zap.String("sessionId", sessionId), + zap.String("deviceId", deviceId), zap.String("fileName", processedFileName), zap.Error(err)) + return + } + log.Info(fmt.Sprintf("Processed file uploaded to s3 with response: %v", uploadResponse), zap.String("sessionId", sessionId), + zap.String("deviceId", deviceId), zap.String("fileName", processedFileName)) + toBeDeletedZips = append(toBeDeletedZips, processedFileName) + } + + // Generate video using the zips processed till now + err = m.generateVideoOfProcessedImagesAndUpload(processedFilesPath, uuidSessionId, sessionId, events[0], deviceId, videoUploadBucket, imageType, 0, snapshotPerSecond, toBeDeletedZips, fileTypeExtension) + if err != nil { + log.Error("Error occurred while generating video of processed zip", zap.String("sessionId", sessionId), zap.String("eventId", events[0]), zap.Error(err)) + return + } + + // Generate pre-signed URL of the video + downloadURL, err := m.s3Client.PresignedDownloadUrl(videoUploadBucket, + sessionId+utils.VideoExtension.String(), sessionId+utils.VideoExtension.String()) + if err != nil { + log.Error("generating Presigned download url failed", zap.String("sessionId", sessionId), + zap.String("deviceId", deviceId), zap.Error(err)) + return + } + + // After uploading new processed zip to s3 , mark the fragment video generation status as COMPLETED + retryFunc := func() (interface{}, error) { + esStatusCode, err := m.videoGenerationStatusAccessLayer.UpdateFragmentVideoGenerationStatus(sessionId, videoGenerationStatusIndex, events[0], utils.COMPLETED, processedFileName, 0) + return esStatusCode, err + } + _, err = utils.RetryFunctionWithResponseAndError(retryFunc, config.NewCoreElasticSearchConfig().ElasticSearchUpdateMaxRetry, config.NewCoreElasticSearchConfig().ElasticSearchUpdateRetryBackOffInSeconds*time.Second) + + if err != nil { + log.Error("Error while updating video generation status", zap.String("sessionId", sessionId), zap.String("eventId", events[0])) + } + + // Generate other event labels of session to be returned in the response + labels, _ := m.fetchLabelsForSession(sessionId, utils.EMPTY, eventIngestionIndex) + log.Info("video generation successful", zap.String("eventId", sessionId), + zap.String("deviceId", deviceId)) + genericResponse = utils.AddDataToResponse(response.VideoGenerationStatus{ + Link: downloadURL, + DeviceId: deviceId, + SessionId: sessionId, + Model: esResponse[0].Source.DeviceModel, + DeviceCarrierName: esResponse[0].Source.CarrierName, + AppVersionCode: esResponse[0].Source.AppVersionCode, + AppVersionName: esResponse[0].Source.AppVersionName, + DeviceOs: esResponse[0].Source.AppOS, + RecordStartingTime: esResponse[0].Source.ClientTs, + Labels: labels, + FragmentsCompletedTillNow: 1, + VideoGeneratedTillNow: 1, + TotalFragments: len(events), + }, http.StatusOK, genericResponse) + countForSuccessResponse++ + + // Once the video is generated, assign the process to the Next Fragment zips to a newer thread + if len(events) > 1 { + go func() { + err := m.generateNextFragment(sessionId, 1, events, sessionUploadBucket, videoGenerationStatusIndex, eventIngestionIndex, indexName, appVersionCode, client, imageType, fileTypeExtension) + if err != nil { + log.Error("Error generating video of next fragment", zap.String("sessionId", sessionId), zap.String("fragmentIndex", "1")) + } + }() + } + } else { + // Todo handle for live sessions + var videoGenerationStatusResponses []es.VideoGenerationStatusResponse + jsonHits, err := json.Marshal(result.Hits.Hits) + if err != nil { + log.Error("Error while marshalling ", zap.Error(err)) + return + } + if err = json.Unmarshal(jsonHits, &videoGenerationStatusResponses); err != nil { + log.Error("Error while unmarshalling ", zap.Error(err)) + return + } + labels, _ := m.fetchLabelsForSession(sessionId, utils.EMPTY, eventIngestionIndex) + /*indexName := esResponse[0].Index + appVersionCode := esResponse[0].Source.AppVersionCode*/ + + downloadURL, err := m.s3Client.PresignedDownloadUrl(videoUploadBucket, + sessionId+utils.VideoExtension.String(), sessionId+utils.VideoExtension.String()) + if err != nil { + log.Error("generating Presigned download url failed", zap.String("sessionId", sessionId), zap.Error(err)) + return + } + genericResponse = utils.AddDataToResponse(response.VideoGenerationStatus{ + Link: downloadURL, + DeviceId: deviceId, + SessionId: sessionId, + Model: esResponse[0].Source.DeviceModel, + DeviceCarrierName: esResponse[0].Source.CarrierName, + AppVersionCode: esResponse[0].Source.AppVersionCode, + AppVersionName: esResponse[0].Source.AppVersionName, + DeviceOs: esResponse[0].Source.AppOS, + RecordStartingTime: esResponse[0].Source.ClientTs, + Labels: labels, + FragmentsCompletedTillNow: videoGenerationStatusResponses[0].Source.FragmentsCompletedTillNow + 1, + TotalFragments: len(sessionResponse), + VideoGeneratedTillNow: videoGenerationStatusResponses[0].Source.VideoGeneratedTillNow + 1, + }, http.StatusOK, genericResponse) + + /*extraFragments := len(sessionResponse) - videoGenerationStatusResponses[0].Source.FragmentsCompletedTillNow -1 + + if extraFragments > 0 { + m.createVideoGenerationStatusEntity(sessionId, videoGenerationStatusIndex, requesterEmailId, events, videoGenerationStatusResponses[0].Source.FragmentsCompletedTillNow+1, videoGenerationStatusResponses[0].Source.VideoGeneratedTillNow, videoGenerationStatusResponses[0]) + go func() { + err := m.generateNextFragment(sessionId, videoGenerationStatusResponses[0].Source.FragmentsCompletedTillNow+1, events, sessionUploadBucket, videoGenerationStatusIndex, eventIngestionIndex, indexName, appVersionCode, client) + if err != nil { + log.Error("Error generating video of next fragment", zap.String("sessionId", sessionId), zap.String("fragmentIndex", "1")) + } + }() + }*/ + + c.JSON(http.StatusOK, utils.SuccessPaginatedResponse(genericResponse, common.Page{}, http.StatusOK)) + return + } + } + } + + if page.SortDirection == es.DESC { + utils.Sort(&genericResponse) + } else { + utils.SortAsc(&genericResponse) + } + + c.JSON(http.StatusMultiStatus, utils.SuccessPaginatedResponse(genericResponse, common.Page{ + PageSize: countForSuccessResponse, + TotalPages: int64(math.Ceil(float64(page.TotalSize) / float64(page.PageSize))), + TotalElements: page.TotalSize, + PageNumber: page.PageNumber, + }, http.StatusMultiStatus)) + +} + +// This flow is to save time for DsMasking. This function will do the preprocessing and upload the ds processed zip to s3 +//func (m *MediaHandler) asyncDSMasking(sessionId string, events []string, sessionUploadBucket, eventIngestionIndex, videoGenerationStatusIndex, clientName string, esResponse []es.SessionResponse) error { +// log.Info("Starting asyncDSMasking", zap.String("sessionId", sessionId)) +// for i := 1; i < len(events); i++ { +// indexName := esResponse[i].Index +// appVersionCode := esResponse[i].Source.AppVersionCode +// err := m.processNextZip(sessionId, i, events, sessionUploadBucket, videoGenerationStatusIndex, eventIngestionIndex, indexName, appVersionCode, clientName, true) +// if err != nil { +// log.Error("Error occurred while applying DS Masking", zap.String("sessionId", sessionId), zap.String("eventId", events[i]), zap.Error(err)) +// } +// } +// return nil +//} + +func (m *MediaHandler) FetchLatestVideo(c *gin.Context) { + + sessionId := c.Query(utils.SESSION_ID) + fragmentsTillNow := c.Query(utils.FRAGMENTS_TILL_NOW) + + client, err := helper.ValidateAPIKeyHeaders(c) + if err != nil { + c.JSON(http.StatusBadRequest, utils.ErrorResponse(err, http.StatusBadRequest, nil)) + return + } + + generatedUuid := uuid.New() + uuidSessionId := sessionId + utils.HYPHEN + generatedUuid.String() + + videoGenerationStatusIndex := config.GetCoreConfig().ElasticSearchConfig.VideoGenerationStatusIndexClientMap[client] + sessionUploadBucket := config.GetCoreConfig().S3Config.SessionUploadBucketClientMap[client] + videoUploadBucket := config.GetCoreConfig().S3Config.VideoUploadBucketClientMap[client] + sessionUploadIndex := config.GetCoreConfig().ElasticSearchConfig.SessionUploadIndexClientMap[client] + eventIngestionIndex := config.GetCoreConfig().ElasticSearchConfig.EventIngestionIndexClientMap[client] + + page := es.Page{ + PageSize: 10000, + PageNumber: 0, + SortDirection: es.DESC, + } + + sessionResponse, err := m.sessionsAccessLayer.FetchSessionsWithSessionIds([]string{sessionId}, &page, sessionUploadIndex) + if err != nil { + log.Error("could not find any session for given inputs", zap.String("sessionId", sessionId), zap.Error(err)) + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, response.SessionErrorData{SessionId: sessionId})) + return + } + + indexName := sessionResponse[0].Index + appVersionCode := sessionResponse[0].Source.BaseAttributes.AppVersionCode + imageType := utils.DOT + sessionResponse[0].Source.BaseAttributes.ImageType + fileTypeExtension := sessionResponse[0].Source.BaseAttributes.FileTypeExtension + + eventBucketsPerSession, _, _, snapshotPerSecond := helper.CreateBucketsForSession(sessionResponse) + + videoGenerationStatus, err := m.videoGenerationStatusAccessLayer.FetchVideoGenerationStatus(sessionId, videoGenerationStatusIndex) + if err != nil { + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, "Video generation status could not be fetched")) + return + } + result, err := mapper.MapESApiResponseToESResponse(videoGenerationStatus) + var videoGenerationStatusResponses []es.VideoGenerationStatusResponse + jsonHits, err := json.Marshal(result.Hits.Hits) + if err != nil { + log.Error("Error while marshalling ", zap.Error(err)) + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, "Video generation status could not be fetched")) + return + } + if err = json.Unmarshal(jsonHits, &videoGenerationStatusResponses); err != nil { + log.Error("Error while unmarshalling ", zap.Error(err)) + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, "Video generation status could not be fetched")) + return + } + + fragmentsInInteger := utils.GetIntFromString(fragmentsTillNow) + var genericResponse []common.Response + // Todo handle for live sessions + if videoGenerationStatusResponses[0].Source.FragmentsCompletedTillNow >= fragmentsInInteger { + processedTill := 0 + var toBeDeletedProcessedZips []string + pathToUnzippedFiles := utils.EMPTY + needsToBeProcessed := false + for _, fragmentStatus := range videoGenerationStatusResponses[0].Source.VideoFragmentStatuses { + if fragmentStatus.ZipProcessingStatus == utils.COMPLETED && fragmentStatus.ProcessedZipName != utils.EMPTY { + pathToUnzippedFiles, err = m.s3Client.DownloadAndUnzipFileWithExtension(sessionUploadBucket, + utils.TempDestinationFolder, fragmentStatus.ProcessedZipName+fileTypeExtension, fragmentStatus.ProcessedZipName+fileTypeExtension, uuidSessionId, fileTypeExtension) + if err != nil { + log.Error("Error occurred while downloading processed zip file", zap.String("sessionId", sessionId), zap.String("zip_name", fragmentStatus.ProcessedZipName)) + break + } + needsToBeProcessed = true + toBeDeletedProcessedZips = append(toBeDeletedProcessedZips, fragmentStatus.ProcessedZipName) + processedTill = processedTill + 1 + } else { + // Assuming video generation should not stop if the zip was not processed due to some reason + // break + // In else block if required we are trying to reprocess the zip which could have failed + log.Info("EventId is not processed yet", zap.String("eventId", fragmentStatus.EventID), zap.String("sessionId", sessionId)) + go func() { + for sessionId, events := range *eventBucketsPerSession { + log.Info("Trying to process the fragment again", zap.String("sessionId", sessionId), zap.String("eventId", events[processedTill]), zap.Error(err)) + err := m.processNextZip(sessionId, processedTill, events, sessionUploadBucket, videoGenerationStatusIndex, eventIngestionIndex, indexName, appVersionCode, client, imageType, false, fileTypeExtension) + if err != nil { + log.Error("Error occurred even after retrying to process the zip", zap.String("sessionId", sessionId), zap.String("eventId", events[processedTill]), zap.Error(err)) + } + } + }() + } + } + + if needsToBeProcessed { + err = m.generateVideoOfProcessedImagesAndUpload(pathToUnzippedFiles, uuidSessionId, sessionId, utils.EMPTY, utils.EMPTY, videoUploadBucket, imageType, videoGenerationStatusResponses[0].Source.FragmentsCompletedTillNow, snapshotPerSecond, toBeDeletedProcessedZips, fileTypeExtension) + retryFunc := func() (interface{}, error) { + status := utils.PENDING + if processedTill >= videoGenerationStatusResponses[0].Source.TotalFragments { + status = utils.COMPLETED + } + esStatusCode, err := m.videoGenerationStatusAccessLayer.UpdateVideoGenerationStatus(sessionId, videoGenerationStatusIndex, status, processedTill-1) + return esStatusCode, err + } + _, err = utils.RetryFunctionWithResponseAndError(retryFunc, config.NewCoreElasticSearchConfig().ElasticSearchUpdateMaxRetry, config.NewCoreElasticSearchConfig().ElasticSearchUpdateRetryBackOffInSeconds*time.Second) + if err != nil { + log.Error("Error while updating till when the video has been generated", zap.String("sessionId", sessionId), zap.Error(err)) + return + } + } + } + + // This is just a fallback in case due to some error, next fragments generation stops and client keeps sending same fragment count which was returned earlier + // If required we can put up a check to call only if last processed at timestamp was way before.Ideally this block should never get executed + if fragmentsInInteger == videoGenerationStatusResponses[0].Source.FragmentsCompletedTillNow && videoGenerationStatusResponses[0].Source.VideoGenerationStatus != utils.COMPLETED { + log.Info("Scheduling to generate next fragments ", zap.Int("fragmentIndex", videoGenerationStatusResponses[0].Source.FragmentsCompletedTillNow+1), zap.String("sessionId", sessionId)) + go func() { + for sessionId, events := range *eventBucketsPerSession { + err := m.generateNextFragment(sessionId, videoGenerationStatusResponses[0].Source.FragmentsCompletedTillNow+1, events, sessionUploadBucket, videoGenerationStatusIndex, eventIngestionIndex, indexName, appVersionCode, client, imageType, fileTypeExtension) + if err != nil { + return + } + } + }() + } + + downloadURL, err := m.s3Client.PresignedDownloadUrl(videoUploadBucket, + sessionId+utils.VideoExtension.String(), sessionId+utils.VideoExtension.String()) + if err != nil { + log.Error("generating Presigned download url failed", zap.String("sessionId", sessionId), zap.Error(err)) + return + } + + genericResponse = utils.AddDataToResponse(response.VideoGenerationStatus{ + LatestUrl: downloadURL, + FragmentsCompletedTillNow: videoGenerationStatusResponses[0].Source.FragmentsCompletedTillNow + 1, + TotalFragments: videoGenerationStatusResponses[0].Source.TotalFragments + 1, + VideoGeneratedTillNow: videoGenerationStatusResponses[0].Source.VideoGeneratedTillNow + 1, + SessionId: sessionId, + }, http.StatusOK, genericResponse) + + c.JSON(http.StatusOK, utils.SuccessPaginatedResponse(genericResponse, common.Page{}, http.StatusOK)) + +} + +func (m *MediaHandler) createVideoGenerationStatusEntity(sessionId, videoGenerationStatusIndex, requesterEmailId string, events []string, fragmentsCompleted int, videoGenerated int, videoStatuses es.VideoGenerationStatusResponse) { + var count = 0 + var videoFragmentStatuses []core.VideoFragmentStatusAttributes + + if len(videoStatuses.Source.VideoFragmentStatuses) > 0 { + for _, videoStatus := range videoStatuses.Source.VideoFragmentStatuses { + videoStatusData := core.VideoFragmentStatusAttributes{ + ZipProcessingStatus: videoStatus.ZipProcessingStatus, + ProcessedZipName: videoStatus.ProcessedZipName, + FragmentOrder: videoStatus.FragmentOrder, + EventId: videoStatus.EventID, + } + videoFragmentStatuses = append(videoFragmentStatuses, videoStatusData) + } + } + + for _, event := range events { + + if count < fragmentsCompleted { + count++ + continue + } + + fragmentStatus := core.VideoFragmentStatusAttributes{ + ZipProcessingStatus: utils.PENDING, + FragmentOrder: count, + EventId: event, + } + videoFragmentStatuses = append(videoFragmentStatuses, fragmentStatus) + count = count + 1 + } + if fragmentsCompleted-1 < 0 { + fragmentsCompleted++ + } + + // Check if a entry exists already before creating a new one + videoGenerationStatusModel := &core.VideoFragmentStatusModel{ + SessionId: sessionId, + FinalStatus: utils.PENDING, + VideoName: sessionId, + TotalFragments: int64(len(events)) - 1, + FragmentsCompletedTillNow: fragmentsCompleted - 1, + RequestedAt: utils.GetCurrentTimeInMillis(), + RequestedBy: requesterEmailId, + VideoFragmentStatusAttributes: videoFragmentStatuses, + VideoGeneratedTillNow: videoGenerated, + } + videoGenerationStatuses, err := json.Marshal(videoGenerationStatusModel) + if err != nil { + log.Error("error in serializing app events", zap.Error(err)) + return + } + err = m.videoGenerationStatusAccessLayer.CreateVideoGenerationStatus(*videoGenerationStatusModel, string(videoGenerationStatuses), videoGenerationStatusIndex) + if err != nil { + log.Error("error in updating video generation status", zap.Error(err)) + return + } +} + +func (m *MediaHandler) generateVideoOfProcessedZip(folderPath, uuidSessionId, sessionId, eventId, deviceId, videoUploadBucket, imageType string, fragmentIndex int, snapshotPerSecond int64, toBeDeletedZips []string) error { + log.Info("Generating video from images", zap.String("sessionId", sessionId)) + _, err := ffmpeg.GenerateVideoFromImages(folderPath, uuidSessionId, imageType, snapshotPerSecond) + if err != nil { + log.Error("generating video from images failed", zap.String("sessionId", sessionId), + zap.String("eventId", eventId), zap.Int("fragmentIndex", fragmentIndex), zap.String("folderPath", folderPath), zap.Error(err)) + metrics.MediaGenerationFailureCounter.WithLabelValues(utils.VideoExtension.String()).Inc() + return err + } + metrics.MediaGenerationSuccessCounter.WithLabelValues(utils.VideoExtension.String()).Inc() + log.Info("Video generation successful", zap.String("sessionId", sessionId), + zap.String("eventId", eventId), zap.Int("fragmentIndex", fragmentIndex), zap.Error(err)) + + err = m.uploadLocalFileToS3(uuidSessionId, sessionId, utils.EMPTY, deviceId, videoUploadBucket, folderPath) + if err != nil { + log.Error("generating video from images failed", zap.String("sessionId", sessionId), + zap.String("deviceId", deviceId), zap.Error(err)) + return err + } + defer utils.DeleteFileFromLocal(uuidSessionId, &toBeDeletedZips) + return nil +} + +func (m *MediaHandler) generateVideoOfProcessedImagesAndUpload(folderPath, uuidSessionId, sessionId, eventId, deviceId, videoUploadBucket, imageType string, fragmentIndex int, snapshotPerSecond int64, toBeDeletedZips []string, fileTypeExtension string) error { + ctx := context.Background() + if !limiter.TryAcquire(ctx, limiter.VideoSem, limiter.DefaultTimeout) { + log.Error("Video processing queue full, try again later", + zap.String("sessionId", sessionId), + zap.Int("fragmentIndex", fragmentIndex)) + return errors.New("video processing queue is full, please try again later") + } + defer limiter.Release(limiter.VideoSem) + + log.Info("Generating video from images", zap.String("sessionId", sessionId)) + _, err := ffmpeg.GenerateVideoFromImages(folderPath, uuidSessionId, imageType, snapshotPerSecond) + if err != nil { + log.Error("generating video from images failed", zap.String("sessionId", sessionId), + zap.String("eventId", eventId), zap.Int("fragmentIndex", fragmentIndex), zap.String("folderPath", folderPath), zap.Error(err)) + metrics.MediaGenerationFailureCounter.WithLabelValues(utils.VideoExtension.String()).Inc() + return err + } + metrics.MediaGenerationSuccessCounter.WithLabelValues(utils.VideoExtension.String()).Inc() + log.Info("Video generation successful", zap.String("sessionId", sessionId), + zap.String("eventId", eventId), zap.Int("fragmentIndex", fragmentIndex), zap.Error(err)) + + err = m.uploadLocalFileToS3(uuidSessionId, sessionId, utils.EMPTY, deviceId, videoUploadBucket, folderPath) + if err != nil { + log.Error("generating video from images failed", zap.String("sessionId", sessionId), + zap.String("deviceId", deviceId), zap.Error(err)) + return err + } + defer utils.DeleteFileFromLocalWithExtension(uuidSessionId, &toBeDeletedZips, fileTypeExtension) + return nil +} + +func (m *MediaHandler) generateNextFragment(sessionID string, fragmentIndex int, events []string, sessionUploadBucket, fragmentVideoStatusIndex, eventIngestionIndex, indexName, appVersionCode, client, imageType string, fileTypeExtension string) error { + + log.Info("Started generating next fragment", zap.String("sessionId", sessionID), zap.Int("fragmentIndex", fragmentIndex)) + + i := fragmentIndex + for i = fragmentIndex; i < len(events); i++ { + log.Info("Processing fragment", zap.String("sessionId", sessionID), zap.Int("fragmentIndex", i)) + err := m.processNextZip(sessionID, i, events, sessionUploadBucket, fragmentVideoStatusIndex, eventIngestionIndex, indexName, appVersionCode, client, imageType, false, fileTypeExtension) + if err != nil { + log.Error("Error generating video of fragment", zap.String("sessionID", sessionID), zap.Int("fragmentIndex", i), zap.Error(err)) + return err + } + } + + log.Info("Video generation complete, marking final status as COMPLETED", zap.String("sessionId", sessionID)) + + retryFunc := func() (interface{}, error) { + esStatusCode, err := m.videoGenerationStatusAccessLayer.UpdateVideoGenerationStatus(sessionID, fragmentVideoStatusIndex, utils.COMPLETED, 0) + return esStatusCode, err + } + _, err := utils.RetryFunctionWithResponseAndError(retryFunc, config.NewCoreElasticSearchConfig().ElasticSearchUpdateMaxRetry, config.NewCoreElasticSearchConfig().ElasticSearchUpdateRetryBackOffInSeconds*time.Second) + + if err != nil { + log.Error("Error while updating final video generation status", zap.String("sessionId", sessionID), zap.Error(err)) + return err + } + return nil +} + +func (m *MediaHandler) processNextZip(sessionID string, fragmentIndex int, events []string, sessionUploadBucket, fragmentVideoStatusIndex, eventIngestionIndex, indexName, appVersionCode, client, imageType string, isAsyncDSMaskingEnabled bool, fileTypeExtension string) error { + ctx := context.Background() + if !limiter.TryAcquire(ctx, limiter.VideoSem, limiter.DefaultTimeout) { + log.Error("Video processing queue full, skipping fragment for now", + zap.String("sessionId", sessionID), + zap.Int("fragmentIndex", fragmentIndex)) + return errors.New("video processing queue is full, please try again later") + } + defer limiter.Release(limiter.VideoSem) + + event := events[fragmentIndex] + generatedUuid := uuid.New() + uuidSessionId := sessionID + utils.HYPHEN + generatedUuid.String() + pathToUnzippedFiles, err := m.s3Client.DownloadAndUnzipFileWithExtension(sessionUploadBucket, utils.TempDestinationFolder, event+fileTypeExtension, event+fileTypeExtension, uuidSessionId, fileTypeExtension) + if err != nil { + log.Error("Error occurred while downloading and unzipping file in processing next zip", zap.String("sessionId", sessionID), zap.String("eventId", event), zap.Error(err)) + return err + } + processedFilesPath, isProcessed, err := m.zipProcessor.ProcessZip(pathToUnzippedFiles, sessionID, eventIngestionIndex, indexName, client, appVersionCode, event, imageType, isAsyncDSMaskingEnabled) + if err != nil { + log.Error("Error occurred while processing zips", zap.String("sessionId", sessionID), zap.Error(err)) + return err + } + + processedFileName := event + toBeDeletedZips := []string{event} + defer utils.DeleteFileFromLocalWithExtension(uuidSessionId, &toBeDeletedZips, fileTypeExtension) + if isAsyncDSMaskingEnabled { + return nil + } + if isProcessed { + processedFileName = event + utils.HYPHEN + utils.PROCESS_FILE_NAME_SUFFIX + zipFilePath := filepath.Join(utils.TempDestinationFolder, processedFileName+fileTypeExtension) + switch fileTypeExtension { + case utils.ZipExtension.String(): + err := helper.CreateZipFile(zipFilePath, processedFilesPath, imageType, []string{}) + if err != nil { + log.Error("Error occurred while creating the zip file", zap.String("sessionId", sessionID), zap.String("fileName", processedFileName), zap.Error(err)) + return err + } + case utils.ZipXzExtension.String(): + err = helper.CreateZipXzFile(zipFilePath, processedFilesPath, imageType, []string{}) + if err != nil { + metrics.MediaGenerationFailureCounter.WithLabelValues(fileTypeExtension) + log.Error("Error occurred while creating the zip.xz file", zap.String("sessionId", sessionID), zap.String("fileName", processedFileName), zap.Error(err)) + return err + } + default: + log.Error("Unsupported file extension", zap.String("fileTypeExtension", fileTypeExtension)) + return err + } + + uploadResponse, err := m.s3Client.UploadFile(sessionUploadBucket, utils.TempDestinationFolder, processedFileName+fileTypeExtension, processedFileName+fileTypeExtension) + toBeDeletedZips = append(toBeDeletedZips, processedFileName) + if err != nil { + log.Error("failed to file uploaded to s3", zap.String("sessionId", sessionID), zap.Error(err)) + return err + } + log.Info(fmt.Sprintf("file uploaded to s3 with response: %v", uploadResponse), zap.String("sessionId", sessionID)) + } + if err != nil { + log.Error("Error occurred while processing zips", zap.String("sessionId", sessionID), zap.Error(err)) + return err + } + + retryFunc := func() (interface{}, error) { + esStatusCode, err := m.videoGenerationStatusAccessLayer.UpdateFragmentVideoGenerationStatus(sessionID, fragmentVideoStatusIndex, event, utils.COMPLETED, processedFileName, int64(fragmentIndex)) + return esStatusCode, err + } + _, err = utils.RetryFunctionWithResponseAndError(retryFunc, config.GetCoreConfig().ElasticSearchConfig.ElasticSearchUpdateMaxRetry, config.GetCoreConfig().ElasticSearchConfig.ElasticSearchUpdateRetryBackOffInSeconds*time.Second) + + // Update processed FileName + if err != nil { + log.Error("error updating video generation status", zap.Error(err)) + return err + } + return nil +} + +func (m *MediaHandler) FetchHistoricVideo(c *gin.Context) { + + eventIds := strings.Split(c.Query("event_ids"), utils.COMMA) + snapshotPerSecondString := c.Query("snapshot_per_second") + imageType := c.Query(utils.IMAGE_TYPE) + if imageType == utils.EMPTY { + imageType = utils.ImageTypeJpeg + } + imageType = utils.DOT + imageType + + snapshotPerSecond := utils.GetInt64FromString(snapshotPerSecondString) + + client, err := helper.ValidateAPIKeyHeaders(c) + if err != nil { + c.JSON(http.StatusBadRequest, utils.ErrorResponse(err, http.StatusBadRequest, nil)) + return + } + + generatedUuid, err := uuid.NewUUID() + + sessionUploadBucket := config.GetCoreConfig().S3Config.SessionUploadBucketClientMap[client] + videoUploadBucket := config.GetCoreConfig().S3Config.VideoUploadBucketClientMap[client] + + if err != nil { + log.Error("uuid generation failed", zap.Error(err)) + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, nil)) + } + + videoLink, err := m.fetchVideoForSession(&eventIds, "historic-video-"+generatedUuid.String(), "historic-video-"+generatedUuid.String(), utils.EMPTY, utils.EMPTY, sessionUploadBucket, videoUploadBucket, utils.EMPTY, utils.EMPTY, snapshotPerSecond, utils.EMPTY, utils.EMPTY, imageType, false) + if err != nil { + log.Error("video generation failed", zap.Error(err)) + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, nil)) + } + + c.JSON(http.StatusOK, utils.SuccessResponse(map[string]string{ + "link": videoLink, + }, http.StatusOK)) +} + +func (m *MediaHandler) fetchVideoForSession(sessions *[]string, sessionId, overAllSessionId, customerId, deviceId string, sessionUploadBucket string, videoUploadBucket string, eventIngestionIndex string, videoGenerationIndex string, snapshotPerSecond int64, appName, indexName, imageType string, streamingEnabled bool) (string, error) { + err := m.generateVideo(sessions, sessionId, overAllSessionId, customerId, deviceId, sessionUploadBucket, videoUploadBucket, eventIngestionIndex, videoGenerationIndex, snapshotPerSecond, appName, indexName, imageType, streamingEnabled) + if err != nil { + log.Error("generating video failed", + zap.String("customerId", customerId), zap.String("sessionId", sessionId), + zap.String("deviceId", deviceId), zap.Error(err)) + return "", err + } + log.Info("generated video and uploaded to s3 successfully", + zap.String("customerId", customerId), zap.String("sessionId", sessionId), + zap.String("deviceId", deviceId)) + + //generating presigned download url for user + downloadURL, err := m.s3Client.PresignedDownloadUrl(videoUploadBucket, + sessionId+utils.VideoExtension.String(), sessionId+utils.VideoExtension.String()) + if err != nil { + log.Error("generating Presigned download url failed", + zap.String("customerId", customerId), zap.String("sessionId", sessionId), + zap.String("deviceId", deviceId), zap.Error(err)) + return "", err + } + return downloadURL, nil +} + +// generateVideo will do nothing if the video is already present on s3, otherwise it will generate video and +// upload on s3. +func (m *MediaHandler) generateVideo(sessions *[]string, sessionId, overAllSessionId, customerId, deviceId string, sessionUploadBucket string, videoUploadBucket string, eventIngestionIndex string, videoGenerationIndex string, snapshotPerSecond int64, appName, indexName, imageType string, streamingEnabled bool) error { + //check if video already present on s3 + videoPresent, err := m.s3Client.CheckIfPresent(videoUploadBucket, + sessionId+utils.VideoExtension.String()) + if err != nil { + log.Error("error fetching from the s3", + zap.String("customerId", customerId), zap.String("sessionId", sessionId), + zap.String("deviceId", deviceId), zap.Error(err)) + return err + } + if videoPresent { + log.Info("video found on s3", + zap.String("customerId", customerId), zap.String("sessionId", sessionId), + zap.String("deviceId", deviceId)) + return nil + } + + return m.generateVideoToS3(sessions, sessionId, overAllSessionId, customerId, deviceId, sessionUploadBucket, videoUploadBucket, eventIngestionIndex, videoGenerationIndex, snapshotPerSecond, appName, indexName, imageType, streamingEnabled) +} + +// generateVideoToS3 will generate the video and upload it to s3 bucket configured. +func (m *MediaHandler) generateVideoToS3(sessions *[]string, sessionId, overAllSessionId, customerId, deviceId, sessionUploadBucket, videoUploadBucket, eventIngestionIndex, videoGenerationIndex string, snapshotPerSecond int64, appName, indexName, imageType string, streamingEnabled bool) error { + + generatedUuid, _ := uuid.NewUUID() + uuidSessionId := sessionId + utils.HYPHEN + generatedUuid.String() + + defer utils.DeleteFileFromLocal(uuidSessionId, sessions) + + pathToUnzippedFiles := utils.EMPTY + var err error + + appNames := config.GetCoreConfig().ZipsWithEventsAppVersions + + if strings.Contains(appNames, appName) && appName != utils.EMPTY { + zipsList, err := m.eventsAccessLayer.FetchZipsFromSession(sessionId, eventIngestionIndex, indexName) + if err != nil { + return errors.New("no session found") + } + + for _, zip := range zipsList { + tempPathToUnzippedFiles, err := m.s3Client.DownloadAndUnzipFile(sessionUploadBucket, + utils.TempDestinationFolder, zip+utils.ZipExtension.String(), zip+utils.ZipExtension.String(), uuidSessionId) + if err != nil { + log.Error("downloading zip file failed", + zap.String("customerId", customerId), zap.String("sessionId", sessionId), + zap.String("deviceId", deviceId), zap.Error(err)) + } + if tempPathToUnzippedFiles != utils.EMPTY { + pathToUnzippedFiles = tempPathToUnzippedFiles + } + } + + for _, eventId := range *sessions { + if utils.Contains(zipsList, eventId) == true { + continue + } + //download and unzip file + tempPathToUnzippedFiles, err := m.s3Client.DownloadAndUnzipFile(sessionUploadBucket, + utils.TempDestinationFolder, eventId+utils.ZipExtension.String(), eventId+utils.ZipExtension.String(), uuidSessionId) + if err != nil { + log.Error("downloading zip file failed", + zap.String("customerId", customerId), zap.String("sessionId", sessionId), + zap.String("deviceId", deviceId), zap.Error(err)) + } + if tempPathToUnzippedFiles != utils.EMPTY { + pathToUnzippedFiles = tempPathToUnzippedFiles + } + } + + if pathToUnzippedFiles == utils.EMPTY { + return errors.New("no Session Found") + } + + imageFiles, err := filepath.Glob(filepath.Join(pathToUnzippedFiles, utils.ASTERISK+imageType)) + + sort.Slice(imageFiles, func(i, j int) bool { + timestampI, _ := helper.ExtractTimestampFromImage(imageFiles[i]) + timeStampJ, _ := helper.ExtractTimestampFromImage(imageFiles[j]) + return timestampI < timeStampJ + }) + } else { + for _, eventId := range *sessions { + //download and unzip file + pathToUnzippedFiles, err = m.s3Client.DownloadAndUnzipFile(sessionUploadBucket, + utils.TempDestinationFolder, eventId+utils.ZipExtension.String(), eventId+utils.ZipExtension.String(), uuidSessionId) + if err != nil { + log.Error("downloading zip file failed", + zap.String("customerId", customerId), zap.String("sessionId", sessionId), + zap.String("deviceId", deviceId), zap.Error(err)) + return err + } + } + } + + folderPath := pathToUnzippedFiles + + //generating video from images + _, err = ffmpeg.GenerateVideoFromImages(folderPath, uuidSessionId, imageType, snapshotPerSecond) + if err != nil { + log.Error("generating video from images failed", + zap.String("customerId", customerId), zap.String("sessionId", sessionId), + zap.String("deviceId", deviceId), zap.Error(err)) + metrics.MediaGenerationFailureCounter.WithLabelValues(utils.VideoExtension.String()).Inc() + return err + } + metrics.MediaGenerationSuccessCounter.WithLabelValues(utils.VideoExtension.String()).Inc() + log.Info("video file generated", + zap.String("customerId", customerId), zap.String("sessionId", sessionId), + zap.String("deviceId", deviceId)) + + videoFolderPath := filepath.Join(utils.TempDestinationFolder, uuidSessionId) + err = m.uploadLocalFileToS3(uuidSessionId, sessionId, customerId, deviceId, videoUploadBucket, videoFolderPath) + if err != nil { + log.Error("generating video from images failed", + zap.String("customerId", customerId), zap.String("sessionId", sessionId), + zap.String("deviceId", deviceId), zap.Error(err)) + return err + } + log.Info("removed local files", + zap.String("customerId", customerId), zap.String("sessionId", sessionId), + zap.String("deviceId", deviceId)) + return nil +} + +func (m *MediaHandler) uploadLocalFileToS3(uuidSessionId, sessionId, customerId, deviceId, videoUploadBucket, folderPath string) error { + + uploadResponse, err := m.s3Client.UploadFile(videoUploadBucket, + folderPath, uuidSessionId+utils.VideoExtension.String(), sessionId+utils.VideoExtension.String()) + if err != nil { + log.Error("failed to file uploaded to s3", + zap.String("customerId", customerId), zap.String("sessionId", sessionId), + zap.String("deviceId", deviceId), zap.Error(err)) + return err + } + log.Info(fmt.Sprintf("file uploaded to s3 with response: %v", uploadResponse), + zap.String("customerId", customerId), zap.String("sessionId", sessionId), + zap.String("deviceId", deviceId)) + return err +} + +func (m *MediaHandler) fetchLabelsForSession(sessionId, labelFilters string, eventIngestionIndex string) ([]string, *int64) { + var validLabels []string + if labelFilters == "" { + validLabels = []string{ingester.ERROR_LOG, ingester.CRASH_ANALYTICS_EVENT, ingester.ANR_EVENT} + } else { + validLabels = strings.Split(labelFilters, utils.COMMA) + } + res, touchCount, err := m.eventsAccessLayer.FetchEventsFromSessionId(sessionId, &es.Page{}, eventIngestionIndex, true) + if err != nil { + log.Error("No data for sessionId", zap.String("sessionId", sessionId), zap.Error(err)) + return nil, nil + } + var labels []string + for _, esResponse := range res { + esResponseLocal := esResponse + if helper.IsValidLabel(esResponseLocal.Source.EventAttributes.EventName, validLabels) { + labels = append(labels, string(esResponseLocal.Source.EventAttributes.EventType)) + } + } + + return labels, touchCount +} diff --git a/alfred/cmd/core/app/handler/touch_points_handler.go b/alfred/cmd/core/app/handler/touch_points_handler.go new file mode 100644 index 0000000..8d2d09d --- /dev/null +++ b/alfred/cmd/core/app/handler/touch_points_handler.go @@ -0,0 +1,103 @@ +package handler + +import ( + "alfred/cmd/core/app/helper" + "alfred/internal/clients" + "alfred/model/es" + "alfred/model/ingester" + "alfred/pkg/ffmpeg" + "alfred/pkg/log" + "alfred/pkg/s3" + "alfred/repositoryAccessLayer" + "alfred/utils" + "errors" + "fmt" + "go.uber.org/zap" + "path/filepath" + "sync" +) + +type TouchPointsHandler struct { + sessionsAccessLayer repositoryAccessLayer.SessionsAccessLayer + eventsAccessLayer repositoryAccessLayer.EventsAccessLayer + videoGenerationStatusAccessLayer repositoryAccessLayer.VideoGenerationStatusAccessLayer + s3Client s3.S3Client +} + +func NewTouchPointsHandler(repositories *repositoryAccessLayer.RepositoryAccessLayer, s3Client s3.S3Client, httpClient *clients.HttpClient) *TouchPointsHandler { + return &TouchPointsHandler{ + sessionsAccessLayer: repositories.SessionsAccessLayer, + eventsAccessLayer: repositories.EventsAccessLayer, + s3Client: s3Client, + videoGenerationStatusAccessLayer: repositories.VideoGenerationStatusAccessLayer, + } +} + +func (m *TouchPointsHandler) ApplyTouchPoints(pathToUnzippedFiles, sessionId, imageType string, eventsFromSession []es.EventResponse) (string, bool, error) { + + imageFiles, err := filepath.Glob(filepath.Join(pathToUnzippedFiles, utils.ASTERISK+imageType)) + + var wg sync.WaitGroup + var mu sync.Mutex + var errorsList []error + + tempDir := utils.TempDirectory + tempDirPath := filepath.Join(pathToUnzippedFiles, tempDir) + err = utils.CreateDirectory(tempDirPath) + if err != nil { + log.Error("Touch-points could not be created as there was some issue creating directory", zap.String("sessionId", sessionId)) + return pathToUnzippedFiles, false, err + } + + screenShotTimeStampToEventTypeMap := helper.GetScreenShotNameWithTouchPointMap(eventsFromSession) + for _, imageFile := range imageFiles { + wg.Add(1) + go func(imageFile string) { + defer wg.Done() + + imageTimestamp, err := helper.ExtractTimestampFromImage(imageFile) + if err != nil { + mu.Lock() + errorsList = append(errorsList, err) + mu.Unlock() + return + } + + destFile := filepath.Join(tempDirPath, filepath.Base(imageFile)) + if value, isPresent := screenShotTimeStampToEventTypeMap[imageTimestamp]; isPresent { + // if the value is present in the map then process the image + if isPresent && value.EventType == ingester.TOUCH_EVENT { + err = ffmpeg.ApplyTouchPoints(imageFile, destFile, value.XCoordinate, value.YCoordinate, tempDirPath, imageType) + } + // Todo :: Add support later for scroll animation + if err != nil { + mu.Lock() + log.Error("Error while applying touchpoint", zap.String("sessionId", sessionId), zap.Int64("imageTimeStamp", imageTimestamp)) + errorsList = append(errorsList, err) + mu.Unlock() + return + } + } else { + // Add the image simply to path + err := utils.CopyFile(imageFile, destFile) + if err != nil { + mu.Lock() + errorsList = append(errorsList, err) + mu.Unlock() + return + } + } + }(imageFile) + } + wg.Wait() + + if len(errorsList) > 0 { + errMsg := "Errors occurred during processing of touch-points" + for _, err := range errorsList { + errMsg += fmt.Sprintf("- %v\n", err) + } + return pathToUnzippedFiles, false, errors.New(errMsg) + } + + return tempDirPath, true, nil +} diff --git a/alfred/cmd/core/app/handler/web_session_handler.go b/alfred/cmd/core/app/handler/web_session_handler.go new file mode 100644 index 0000000..fc92769 --- /dev/null +++ b/alfred/cmd/core/app/handler/web_session_handler.go @@ -0,0 +1,362 @@ +package handler + +import ( + "alfred/api/request" + "alfred/api/response" + "alfred/cmd/core/app/external" + "alfred/cmd/core/app/helper" + "alfred/config" + "alfred/internal/clients" + "alfred/model/common" + "alfred/model/core" + "alfred/model/es" + "alfred/pkg/log" + "alfred/pkg/s3" + "alfred/repositoryAccessLayer" + "alfred/utils" + "encoding/json" + "fmt" + "github.com/gin-gonic/gin" + "go.uber.org/zap" + "io/ioutil" + "math" + "net/http" + "os" + "path/filepath" + "sort" + "sync" + "time" +) + +type WebSessionHandler struct { + webSessionAccessLayer repositoryAccessLayer.WebSessionsAccessLayer + s3Client s3.S3Client + customerService *external.CustomerService +} + +func NewWebSessionHandler(repositories *repositoryAccessLayer.RepositoryAccessLayer, s3Client s3.S3Client, httpClient *clients.HttpClient) *WebSessionHandler { + return &WebSessionHandler{ + webSessionAccessLayer: repositories.WebSessionsAccessLayer, + s3Client: s3Client, + customerService: external.NewCustomerService(httpClient), + } +} + +func (s *WebSessionHandler) FetchWebSessions(c *gin.Context) { + clientName, err := helper.ValidateAPIKeyHeaders(c) + if err != nil { + log.Error(utils.INVALID_WEB_CLIENT, zap.Error(err)) + c.JSON(http.StatusBadRequest, utils.ErrorResponse(err, http.StatusBadRequest, nil)) + return + + } + sessionId := c.Param("id") + webSessionUploadIndex := config.GetCoreConfig().ElasticSearchConfig.WebSessionUploadIndexClientMap[clientName] + log.Info("Fetch Web Session", zap.String("sessionId", sessionId), zap.String("email", c.GetHeader("X-User-Email")), + zap.String("sessionToken", c.GetHeader("X-Session-Token"))) + sessionResponse, err := s.webSessionAccessLayer.FetchWebSessionsWithSessionId(sessionId, webSessionUploadIndex) + + if err != nil { + log.Error("error while fetching session details", zap.String("sessionId", sessionId)) + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, response.SessionErrorData{ + SessionId: sessionId, + })) + return + } + + var genericResponse []common.Response + finalResponse := response.WebSessionResponseData{ + BaseAttributesDTO: sessionResponse[0].Source.WebBaseAttributes, + } + + if finalResponse.BaseAttributesDTO.Version >= config.GetCoreConfig().S3Config.MinWebVersionSupportingFolderUpload { + data, err := s.downloadAndProcessJsonFiles(sessionId, clientName) + if err != nil { + log.Error("error while downloading json session data from s3", zap.String("sessionId", sessionId)) + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, response.SessionErrorData{ + SessionId: sessionId, + })) + return + } + finalResponse.SessionAttributes.Data = data + + } else { + eventResponsesMap, err := s.downloadAndProcessWebZips(sessionId, clientName, sessionResponse) + if err != nil { + log.Error("error while downloading web zips session data from s3", zap.String("sessionId", sessionId)) + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, response.SessionErrorData{ + SessionId: sessionId, + })) + return + } + for _, session := range sessionResponse { + finalResponse.SessionAttributes.Data = append(finalResponse.SessionAttributes.Data, eventResponsesMap[session.Source.WebSessionAttributes.EventId]...) + } + } + + genericResponse = utils.AddDataToResponse(finalResponse, http.StatusOK, genericResponse) + c.JSON(http.StatusMultiStatus, utils.SuccessPaginatedResponse(genericResponse, common.Page{ + PageSize: len(genericResponse), + }, http.StatusMultiStatus)) +} + +func (s *WebSessionHandler) downloadAndProcessJsonFiles(sessionId, clientName string) ([]string, error) { + targetFolder := filepath.Join(utils.TempDestinationFolder, sessionId) + if !utils.FolderExists(targetFolder) { + _ = utils.CreateDirectory(targetFolder) + } + _, err := s.s3Client.DownloadAllFilesFromFolder(config.GetCoreConfig().S3Config.WebSessionBucketClientMap[clientName], sessionId, targetFolder) + + if err != nil { + return nil, err + } + + return s.processJSONFiles(targetFolder) + +} + +func (s *WebSessionHandler) processJSONFiles(targetFolder string) ([]string, error) { + // Read all files in the directory + files, err := ioutil.ReadDir(targetFolder) + if err != nil { + return nil, fmt.Errorf("failed to read directory: %w", err) + } + + // Slice to store parsed JSON data + var webEventsList []core.WebEventJsonModel + + // Parse each JSON file + for _, file := range files { + filePath := filepath.Join(targetFolder, file.Name()) + + // Read the file content + content, err := os.ReadFile(filePath) + if err != nil { + return nil, fmt.Errorf("failed to read file %s: %w", filePath, err) + } + + if (content == nil) || (len(content) == 0) { + continue + } + // Parse JSON + var webEvent core.WebEventJsonModel + err = json.Unmarshal(content, &webEvent) + if err != nil { + return nil, fmt.Errorf("failed to parse JSON in file %s: %w", filePath, err) + } + + webEventsList = append(webEventsList, webEvent) + } + + // Sort the files by event_timestamp + sort.Slice(webEventsList, func(event1, event2 int) bool { + return webEventsList[event1].EventTimestamp < webEventsList[event2].EventTimestamp + }) + + // Extract the dom data in sorted order + var sortedWebEventsDataList []string + for _, webEvent := range webEventsList { + sortedWebEventsDataList = append(sortedWebEventsDataList, webEvent.DomEventsData...) + } + + return sortedWebEventsDataList, nil +} + +func (s *WebSessionHandler) downloadAndProcessWebZips(sessionId, clientName string, sessionResponse []es.WebSessionResponse) (map[string][]string, error) { + + var waitGroupForAllS3Downloads sync.WaitGroup + var hasErrorMutex sync.Mutex + responsesMap := make(map[string][]string) + hasErrorGettingDataFromS3 := false + maxConcurrency := config.GetCoreConfig().MaxFetchWebVideoGofuncConcurrency + semaphore := make(chan struct{}, maxConcurrency) + + var err error + for _, session := range sessionResponse { + // Acquire a slot in the semaphore (blocks if the limit is reached) + semaphore <- struct{}{} + waitGroupForAllS3Downloads.Add(1) + sessionLocal := session + go func(eventId string) { + defer func() { + // Release the slot in the semaphore when the goroutine exits + <-semaphore + waitGroupForAllS3Downloads.Done() + }() + data, err := s.getWebSessionDataFromS3(eventId, clientName) + if err != nil { + hasErrorMutex.Lock() + hasErrorGettingDataFromS3 = true + log.Error("error while fetching session data from s3", zap.String("sessionId", sessionId), zap.String("eventId", eventId)) + hasErrorMutex.Unlock() + return + } + responsesMap[eventId] = data + }(sessionLocal.Source.WebSessionAttributes.EventId) + } + waitGroupForAllS3Downloads.Wait() + close(semaphore) + + if hasErrorGettingDataFromS3 { + return nil, err + } + return responsesMap, nil +} + +func (s *WebSessionHandler) FetchAllWebSessions(c *gin.Context) { + clientName, err := helper.ValidateAPIKeyHeaders(c) + if err != nil { + log.Error(utils.INVALID_WEB_CLIENT, zap.Error(err)) + c.JSON(http.StatusBadRequest, utils.ErrorResponse(err, http.StatusBadRequest, nil)) + return + } + + agentId := c.Query("agent_id") + ticketId := c.Query("ticket_id") + sessionId := c.Query("session_id") + deviceId := c.Query("device_id") + emailId := c.Query("email_id") + phoneNumber := c.Query("phone_number") + customerId := c.Query("customer_id") + sortBy := helper.WebSortingMapper(c.Query("sort_by")) + startTimestamp, endTimestamp, err := utils.ValidateTimestampsForWeb(c.Query("start_time"), c.Query("end_time"), 15*time.Minute) + if err != nil { + log.Error("error in query parameters", zap.Error(err)) + c.JSON(http.StatusBadRequest, utils.ErrorResponse(err, http.StatusBadRequest, nil)) + return + } + + webSessionUploadIndex := config.GetCoreConfig().ElasticSearchConfig.WebSessionUploadIndexClientMap[clientName] + webSessionUploadIndexList := helper.CreateSearchIndex(webSessionUploadIndex, startTimestamp, endTimestamp) + pageSize, pageNumber, sortDirection, err := utils.ValidatePage(c.Query("page_size"), c.Query("page_number"), c.Query("sort_direction")) + if err != nil { + log.Error("error in query parameters", zap.Error(err)) + c.JSON(http.StatusBadRequest, utils.ErrorResponse(err, http.StatusBadRequest, nil)) + return + } + page := es.Page{ + PageSize: pageSize, + PageNumber: pageNumber, + SortDirection: es.SortDirection(sortDirection), + } + + var deviceIds []string + + if deviceId != utils.EMPTY { + deviceIds = []string{deviceId} + } + + deviceIdsLocal, err := s.customerService.GetDeviceIds(phoneNumber, customerId, deviceIds) + + var clientProjectNameMap = config.GetCoreConfig().ClientProjectNameMap + + var webSessionResponse []es.WebSessionResponse + clientsWithoutDurationResponse := config.GetCoreConfig().ClientsWithoutDurationResponse + if utils.Contains(clientsWithoutDurationResponse, clientName) { + webSessionResponse, err = s.webSessionAccessLayer.FetchAllWebSession(request.WebSessionFilters{ + StartTimestamp: startTimestamp, + EndTimestamp: endTimestamp, + ProjectName: clientProjectNameMap[clientName], + SessionId: sessionId, + DeviceId: deviceIdsLocal, + TicketId: ticketId, + AgentId: agentId, + EmailId: emailId, + SortBy: sortBy, + }, &page, webSessionUploadIndexList) + } else { + webSessionResponse, err = s.webSessionAccessLayer.FetchAllWebSessionWithDuration(request.WebSessionFilters{ + StartTimestamp: startTimestamp, + EndTimestamp: endTimestamp, + ProjectName: clientName, + SessionId: sessionId, + DeviceId: deviceIdsLocal, + TicketId: ticketId, + AgentId: agentId, + EmailId: emailId, + SortBy: sortBy, + }, &page, webSessionUploadIndex, webSessionUploadIndexList) + } + + if err != nil { + log.Error("error while fetching web session details", zap.Error(err)) + c.JSON(http.StatusBadRequest, utils.ErrorResponse(err, http.StatusBadRequest, nil)) + return + } + + var externalInternalCustomerIdMap = make(map[string]string) + + var mappedSessionResponseData []es.WebSessionResponse + for _, session := range webSessionResponse { + sessionLocal := session + value, found := sessionLocal.Source.WebBaseAttributes.Metadata[utils.CUSTOMER_ID] + if found { + externalCustomerId := value.(string) + if _, exists := externalInternalCustomerIdMap[externalCustomerId]; !exists { + externalInternalCustomerIdMap[externalCustomerId] = s.customerService.GetCustomerRefId(externalCustomerId) + } + internalCustomerId := externalInternalCustomerIdMap[externalCustomerId] + sessionLocal.Source.WebBaseAttributes.Metadata[utils.CUSTOMER_ID] = internalCustomerId + } + + mappedSessionResponseData = append(mappedSessionResponseData, sessionLocal) + } + + if customerId != utils.EMPTY { + mappedSessionResponseData = s.filterWebSessionByCustomerId(mappedSessionResponseData, customerId) + } + + var genericResponse []common.Response + for _, session := range mappedSessionResponseData { + genericResponse = utils.AddDataToResponse(response.WebSessionResponseData{ + BaseAttributesDTO: session.Source.WebBaseAttributes, + DurationInMillis: session.WebSessionDurationInMillis, + }, http.StatusOK, genericResponse) + } + + c.JSON(http.StatusMultiStatus, utils.SuccessPaginatedResponse(genericResponse, common.Page{ + PageSize: len(genericResponse), + TotalPages: int64(math.Ceil(float64(page.TotalSize) / float64(page.PageSize))), + PageNumber: pageNumber, + TotalElements: page.TotalSize, + }, http.StatusMultiStatus)) +} + +func (s *WebSessionHandler) getWebSessionDataFromS3(eventId, clientName string) ([]string, error) { + fileName := eventId + utils.GZExtension.String() + err := s.s3Client.DownloadFile(config.GetCoreConfig().S3Config.WebSessionBucketClientMap[clientName], utils.TempDestinationFolder, fileName, fileName) + if err != nil { + return nil, err + } + data, err := s3.ReadFile(filepath.Join(utils.TempDestinationFolder, eventId), utils.GZExtension.String()) + if err != nil { + return nil, err + } + var dataList []string + err = json.Unmarshal(data, &dataList) + if err != nil { + return nil, err + } + defer s.deleteWebFileFromLocal(filepath.Join(utils.TempDestinationFolder, fileName)) + return dataList, nil +} + +func (s *WebSessionHandler) deleteWebFileFromLocal(fileName string) { + err := os.Remove(fileName) + if err != nil { + log.Error(fmt.Sprintf("not able to delete the file %s", fileName), zap.Error(err)) + } +} + +func (s *WebSessionHandler) filterWebSessionByCustomerId(sessions []es.WebSessionResponse, customerId string) []es.WebSessionResponse { + var filteredSessions []es.WebSessionResponse + for _, session := range sessions { + if session.Source.WebBaseAttributes.Metadata[customerId] == customerId { + filteredSessions = append(filteredSessions, session) + } + } + if len(filteredSessions) == 0 { + return sessions + } + return filteredSessions +} diff --git a/alfred/cmd/core/app/handler/zip_processor.go b/alfred/cmd/core/app/handler/zip_processor.go new file mode 100644 index 0000000..8fa1d94 --- /dev/null +++ b/alfred/cmd/core/app/handler/zip_processor.go @@ -0,0 +1,70 @@ +package handler + +import ( + "alfred/cmd/core/app/external" + "alfred/cmd/core/app/service" + "alfred/config" + "alfred/internal/clients" + "alfred/model/es" + "alfred/pkg/log" + "alfred/pkg/s3" + "alfred/repositoryAccessLayer" + "alfred/utils" + "go.uber.org/zap" +) + +type ZipProcessor struct { + sessionsAccessLayer repositoryAccessLayer.SessionsAccessLayer + eventsAccessLayer repositoryAccessLayer.EventsAccessLayer + videoGenerationStatusAccessLayer repositoryAccessLayer.VideoGenerationStatusAccessLayer + s3Client s3.S3Client + customerService *external.CustomerService + maskingService service.MaskingService + touchPointsHandler *TouchPointsHandler +} + +func NewZipProcessor(repositories *repositoryAccessLayer.RepositoryAccessLayer, s3Client s3.S3Client, httpClient *clients.HttpClient) *ZipProcessor { + return &ZipProcessor{ + sessionsAccessLayer: repositories.SessionsAccessLayer, + eventsAccessLayer: repositories.EventsAccessLayer, + s3Client: s3Client, + customerService: external.NewCustomerService(httpClient), + maskingService: service.NewMaskingServiceImpl(repositories, s3Client, httpClient), + touchPointsHandler: NewTouchPointsHandler(repositories, s3Client, httpClient), + videoGenerationStatusAccessLayer: repositories.VideoGenerationStatusAccessLayer, + } +} + +func (m *ZipProcessor) ProcessZip(pathToUnzippedFiles, sessionId, eventIngestionIndex, indexName, clientName, appVersion, zipName, imageType string, isAsyncDSMaskingEnabled bool) (string, bool, error) { + eventsFromSession, _, err := m.eventsAccessLayer.FetchEventsFromSession(sessionId, &es.Page{}, eventIngestionIndex, indexName) + if err != nil { + log.Error("no events were captured while processing zips", zap.String("sessionId", sessionId), zap.Bool("isAsyncDSMaskingEnabled", isAsyncDSMaskingEnabled), zap.Error(err)) + return pathToUnzippedFiles, false, err + } + + maskedImagesPath, isMasked := pathToUnzippedFiles, false + minAppVersion := config.GetCoreConfig().MaskingConfig.MinAppVersionCodeClientMap[clientName] + if config.GetCoreConfig().MaskingConfig.MaskingEnabled && utils.GetIntFromString(appVersion) >= utils.GetIntFromString(minAppVersion) { + maskedImagesPath, isMasked, err = m.maskingService.MaskImages(pathToUnzippedFiles, eventsFromSession, zipName, clientName, imageType, isAsyncDSMaskingEnabled) + if err != nil { + log.Error("Error occurred while applying masking but marking error as nil", zap.String("sessionId", sessionId), zap.Bool("isAsyncDSMaskingEnabled", isAsyncDSMaskingEnabled), zap.Error(err)) + } + } + if isAsyncDSMaskingEnabled { + return maskedImagesPath, isMasked, nil + } + imageWithTouchPointsPath, isTouchPointApplied := maskedImagesPath, false + minAppVersion = config.GetCoreConfig().TouchPointsConfig.MinAppVersionCodeClientMap[clientName] + if config.GetCoreConfig().TouchPointsConfig.Enabled && utils.GetIntFromString(appVersion) >= utils.GetIntFromString(minAppVersion) { + imageWithTouchPointsPath, isTouchPointApplied, err = m.touchPointsHandler.ApplyTouchPoints(maskedImagesPath, sessionId, imageType, eventsFromSession) + if err != nil { + log.Error("Error occurred while applying touch-points but marking error as nil", zap.String("sessionId", sessionId), zap.Error(err)) + return pathToUnzippedFiles, false, nil + } + } + isProcessed := false + if isMasked || isTouchPointApplied { + isProcessed = true + } + return imageWithTouchPointsPath, isProcessed, nil +} diff --git a/alfred/cmd/core/app/helper/common_utils.go b/alfred/cmd/core/app/helper/common_utils.go new file mode 100644 index 0000000..e0a3751 --- /dev/null +++ b/alfred/cmd/core/app/helper/common_utils.go @@ -0,0 +1,216 @@ +package helper + +import ( + "alfred/model/core" + "alfred/model/es" + "alfred/model/ingester" + "alfred/pkg/log" + "alfred/utils" + "fmt" + "github.com/u2takey/go-utils/slice" + "go.uber.org/zap" + "path/filepath" + "strconv" + "strings" + "time" +) + +func GetUniqueSessionsFromEventResponse(eventResponse []es.EventResponse) ([]string, map[string]es.EventResponse) { + uniqueIdsMap := make(map[string]bool) + idToEventMap := make(map[string]es.EventResponse) + var uniqueIds []string + for _, entry := range eventResponse { + sessionId := entry.Source.BaseAttributes.SessionId + if _, value := uniqueIdsMap[sessionId]; !value { + uniqueIdsMap[sessionId] = true + uniqueIds = append(uniqueIds, sessionId) + idToEventMap[sessionId] = entry + } + } + return uniqueIds, idToEventMap +} + +func GetUniqueSessionsFromSessionResponse(sessionResponse []es.SessionResponse) ([]string, map[string]es.SessionResponse) { + uniqueIdsMap := make(map[string]bool) + idToSessionMap := make(map[string]es.SessionResponse) + var uniqueIds []string + for _, entry := range sessionResponse { + sessionId := entry.Source.BaseAttributes.SessionId + if _, value := uniqueIdsMap[sessionId]; !value { + uniqueIdsMap[sessionId] = true + uniqueIds = append(uniqueIds, sessionId) + idToSessionMap[sessionId] = entry + } + } + return uniqueIds, idToSessionMap +} + +func GetScreenShotNameWithTouchPointMap(eventResponse []es.EventResponse) map[int64]core.EventsAndCoordinatesMapping { + screenShotTimeStampToEventTypeMap := make(map[int64]core.EventsAndCoordinatesMapping) + for _, entry := range eventResponse { + if ingester.TOUCH_EVENT == entry.Source.EventAttributes.EventType && entry.Source.ScreenshotTime > 0 { + xCoordinate, okX := entry.Source.EventAttributes.Attributes[utils.START_X].(string) + yCoordinate, OkY := entry.Source.EventAttributes.Attributes[utils.START_Y].(string) + if okX && OkY { + xCoordinateInt := utils.GetIntFromString(xCoordinate) + yCoordinateInt := utils.GetIntFromString(yCoordinate) + if xCoordinateInt > 0 && yCoordinateInt > 0 { + eventAttribute := core.EventsAndCoordinatesMapping{ + XCoordinate: xCoordinateInt, + YCoordinate: yCoordinateInt, + EventType: entry.Source.EventType, + } + screenShotTimestamp := entry.Source.EventAttributes.ScreenshotTime + screenShotTimeStampToEventTypeMap[screenShotTimestamp] = eventAttribute + } + } + } + } + return screenShotTimeStampToEventTypeMap +} + +func contains(screens []string, screenName string) bool { + for _, screen := range screens { + if screen == screenName { + return true + } + } + return false +} + +func GetKeysOfMap(hashmap map[string]string) []string { + var keys []string + for key := range hashmap { + keys = append(keys, key) + } + return keys +} + +func IsValidLabel(eventType string, filterLabel []string) bool { + return slice.ContainsString(filterLabel, eventType, nil) +} + +func CreateBucketsForSession(sessions []es.SessionResponse) (*map[string][]string, *map[string]string, *map[string][]es.SessionResponse, int64) { + eventBuckets := make(map[string][]string) + sessionToDevice := make(map[string]string) + sessionToEsResponse := make(map[string][]es.SessionResponse) + snapshotPerSecond := int64(utils.DEFAULT_RECORDING_FPS) + + for _, session := range sessions { + sessionId := session.Source.BaseAttributes.SessionId + sessionToDevice[sessionId] = session.Source.BaseAttributes.DeviceId + eventIds, ok := eventBuckets[sessionId] + if session.Source.BaseAttributes.SnapshotPerSecond != 0 { + snapshotPerSecond = session.Source.BaseAttributes.SnapshotPerSecond + } + if !ok { + eventBuckets[sessionId] = []string{session.Source.SessionUploadEventAttributes.EventId} + sessionToEsResponse[sessionId] = []es.SessionResponse{session} + } else { + eventBuckets[sessionId] = append(eventIds, session.Source.SessionUploadEventAttributes.EventId) + sessionToEsResponse[sessionId] = append(sessionToEsResponse[sessionId], session) + } + } + return &eventBuckets, &sessionToDevice, &sessionToEsResponse, snapshotPerSecond +} + +func CreateSessionResponseFromEventResponse(eventResponse es.EventResponse) es.SessionResponse { + eventResponse.Source.BaseAttributes.HasErrors = true + return es.SessionResponse{ + Source: struct { + ingester.BaseAttributes `json:"base_attributes"` + ingester.SessionUploadEventAttributes `json:"session_upload_event_attributes"` + CreatedAt int64 `json:"created_at"` + }(struct { + ingester.BaseAttributes + ingester.SessionUploadEventAttributes + CreatedAt int64 + }{ + BaseAttributes: eventResponse.Source.BaseAttributes, + }), + } +} + +func GetUniqueSessionsFromSessionUpload(sessionResponse []es.SessionResponse) []string { + uniqueIdsMap := make(map[string]bool) + var uniqueIds []string + for _, entry := range sessionResponse { + sessionId := entry.Source.BaseAttributes.SessionId + if _, value := uniqueIdsMap[sessionId]; !value { + uniqueIdsMap[sessionId] = true + uniqueIds = append(uniqueIds, sessionId) + } + } + return uniqueIds +} + +func ExtractTimestampFromImage(imageFile string) (int64, error) { + fileName := filepath.Base(imageFile) + timestampStr := strings.TrimSuffix(fileName, filepath.Ext(fileName)) + + parsedTimestamp, err := strconv.Atoi(timestampStr) + if err != nil { + return 0, err + } + return int64(parsedTimestamp), nil +} + +func GenerateIndexNamesWithTimeRanges(startTimestamp, endTimestamp int64, index string) []string { + istLocation, err := time.LoadLocation(utils.IST_TIME_ZONE) + + if err != nil { + log.Error("Error loading IST time zone", zap.Error(err)) + return nil + } + + startTime := time.Unix(0, startTimestamp*int64(time.Millisecond)).In(istLocation) + endTime := time.Unix(0, endTimestamp*int64(time.Millisecond)).In(istLocation) + + var indexes []string + currentTime := startTime + for currentTime.Before(endTime) || currentTime.Equal(endTime) { + year, month, day := currentTime.Date() + dateStr := fmt.Sprintf("-%d-%d-%d", year, month, day) + index := fmt.Sprintf("%s%s", index, dateStr) + indexes = append(indexes, index) + currentTime = currentTime.Add(24 * time.Hour) + } + indexesStr := strings.Join(indexes, ", ") + log.Info("Searching on indexes", zap.String("indexes", indexesStr)) + return indexes +} + +func CreateSearchIndex(index string, startTimestamp, endTimestamp int64) []string { + var searchIndex []string + if startTimestamp > 0 && endTimestamp > 0 { + indexes := GenerateIndexNamesWithTimeRanges(startTimestamp, endTimestamp, index) + searchIndex = indexes + } else { + searchIndex = []string{index + "*"} + } + return searchIndex +} + +func WebSortingMapper(sortBy string) string { + switch sortBy { + case "recordedOn": + return "base_attributes.client_timestamp" + // Add more cases for additional sorting options here + // case "otherSortingOption": + // return "corresponding_database_column" + default: + return "base_attributes.client_timestamp" // Default case + } +} + +func AppSortingMapper(sortBy string) string { + switch sortBy { + case "recordedOn": + return "base_attributes.client_ts" + // Add more cases for additional sorting options here + // case "otherSortingOption": + // return "corresponding_database_column" + default: + return "created_at" // Default case + } +} diff --git a/alfred/cmd/core/app/helper/response_mapper.go b/alfred/cmd/core/app/helper/response_mapper.go new file mode 100644 index 0000000..46a1fd2 --- /dev/null +++ b/alfred/cmd/core/app/helper/response_mapper.go @@ -0,0 +1,56 @@ +package helper + +import ( + "alfred/api/response" + "alfred/model/es" + "alfred/model/ingester" +) + +func MapToFilterData(values []string) []response.FilterData { + var res []response.FilterData + for _, value := range values { + valueLocal := value + res = append(res, response.FilterData{ + Label: valueLocal, + Value: valueLocal, + }) + } + return res +} + +func CreateSessionResponse(session es.SessionResponse, customerId string) response.SearchSessionResponseData { + return response.SearchSessionResponseData{ + CreatedAt: session.Source.CreatedAt, + DeviceAttributes: []ingester.DeviceAttributes{ + session.Source.SessionUploadEventAttributes.BeginningDeviceAttributes, + session.Source.SessionUploadEventAttributes.EndDeviceAttributes, + }, + BaseAttributesDTO: response.BaseAttributesDTO{ + AppVersionCode: session.Source.AppVersionCode, + AppVersionName: session.Source.AppVersionName, + DeviceId: session.Source.DeviceId, + DeviceModel: session.Source.DeviceModel, + DeviceManufacturer: session.Source.DeviceManufacturer, + ScreenResolution: session.Source.ScreenResolution, + AppOS: session.Source.AppOS, + OsVersion: session.Source.OsVersion, + Latitude: session.Source.Latitude, + Longitude: session.Source.Longitude, + NetworkType: session.Source.NetworkType, + CustomerId: customerId, + UpTime: session.Source.UpTime, + CarrierName: session.Source.CarrierName, + Metadata: session.Source.Metadata, + SessionId: session.Source.SessionId, + ParentSessionId: session.Source.ParentSessionId, + StartTimestamp: session.Source.SessionTimeStamp, + EndTimestamp: session.Source.ClientTs, + SnapshotPerSecond: session.Source.SnapshotPerSecond, + HasErrors: session.Source.HasErrors, + ImageType: session.Source.ImageType, + }, + Metadata: response.VideoMetadata{ + Duration: session.SessionDuration, + }, + } +} diff --git a/alfred/cmd/core/app/helper/screenshot_event_helper.go b/alfred/cmd/core/app/helper/screenshot_event_helper.go new file mode 100644 index 0000000..460d189 --- /dev/null +++ b/alfred/cmd/core/app/helper/screenshot_event_helper.go @@ -0,0 +1,62 @@ +package helper + +import ( + "alfred/config" + "alfred/model/es" + "alfred/utils" + "path/filepath" + "sort" + "strconv" +) + +func GetScreenshotsToBeMaskedByScreens(eventResponse []es.EventResponse, folderPathName, imageType string, screens []string) (map[string][]string, map[string][]string) { + imageFiles, _ := filepath.Glob(filepath.Join(folderPathName, utils.ASTERISK+imageType)) + + var screenshotTimes []int64 + + for _, image := range imageFiles { + imageTimestamp, _ := ExtractTimestampFromImage(image) + screenshotTimes = append(screenshotTimes, imageTimestamp) + } + + sort.Slice(screenshotTimes, func(i, j int) bool { + return screenshotTimes[i] < screenshotTimes[j] + }) + + screenshotPathUsed := make(map[string][]string) + screenshotNameUsed := make(map[string][]string) + + for eventIterator := 0; eventIterator < len(eventResponse); eventIterator++ { + eventResp := eventResponse[eventIterator] + var startTime, endTime int64 + if contains(screens, eventResp.Source.ScreenName) { + startTime = eventResp.Source.EventAttributes.EventTimestamp - config.GetCoreConfig().MaskingConfig.BufferTimeForMaskInMillis + for innerEventIterator, innerEvent := range eventResponse[eventIterator:] { + if eventResp.Source.ScreenName != innerEvent.Source.ScreenName { + endTime = innerEvent.Source.EventAttributes.EventTimestamp + eventIterator = eventIterator + innerEventIterator - 1 + break + } + if eventIterator+innerEventIterator == len(eventResponse)-1 { + endTime = innerEvent.Source.EventAttributes.EventTimestamp + eventIterator = eventIterator + innerEventIterator + break + } + } + if startTime > 0 && endTime > 0 { + for screenshotTimeIterator, screenshotTime := range screenshotTimes { + if screenshotTime >= startTime && screenshotTime <= endTime { + screenshot := strconv.FormatInt(screenshotTime, 10) + //check so that 2 screens are not added + screenshotTimes[screenshotTimeIterator] = 0 + fileName := screenshot + imageType + screenshotNameUsed[eventResp.Source.ScreenName] = append(screenshotNameUsed[eventResp.Source.ScreenName], fileName) + screenshotPathUsed[eventResp.Source.ScreenName] = append(screenshotPathUsed[eventResp.Source.ScreenName], filepath.Join(folderPathName, fileName)) + } + } + } + } + } + + return screenshotPathUsed, screenshotNameUsed +} diff --git a/alfred/cmd/core/app/helper/validation.go b/alfred/cmd/core/app/helper/validation.go new file mode 100644 index 0000000..f1a8457 --- /dev/null +++ b/alfred/cmd/core/app/helper/validation.go @@ -0,0 +1,23 @@ +package helper + +import ( + "alfred/config" + "alfred/utils" + "errors" + "github.com/gin-gonic/gin" +) + +func ValidateAPIKeyHeaders(c *gin.Context) (string, error) { + apiKey := c.Request.Header.Get(utils.X_API_KEY) + apiKeyClientMap := config.GetCoreConfig().ApiKeyClientMap + + value, found := apiKeyClientMap[apiKey] + if !found { + return utils.EMPTY, errors.New(utils.INVALID_CLIENT) + } + clientName, err := utils.GetStringValue(value) + if err != nil { + return utils.EMPTY, errors.New("client name could not be parsed") + } + return clientName, nil +} diff --git a/alfred/cmd/core/app/helper/zip_creation.go b/alfred/cmd/core/app/helper/zip_creation.go new file mode 100644 index 0000000..46ce17a --- /dev/null +++ b/alfred/cmd/core/app/helper/zip_creation.go @@ -0,0 +1,122 @@ +package helper + +import ( + "alfred/pkg/log" + "alfred/utils" + "archive/zip" + "bytes" + "github.com/ulikunitz/xz" + "go.uber.org/zap" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" +) + +func CreateZipXzFile(zipXZFilePath string, sourceDir, imagetype string, fileNameList []string) error { + // Step 1: Create an in-memory buffer for the .zip content + var zipBuffer bytes.Buffer + zipWriter := zip.NewWriter(&zipBuffer) + + files, err := ioutil.ReadDir(sourceDir) + if err != nil { + log.Error("Error occurred while reading source directory", zap.String("zipXZFilePath", zipXZFilePath), zap.String("sourceDirectory", sourceDir), zap.Error(err)) + return err + } + + // Add files to the zip archive + for _, file := range files { + if !file.IsDir() && strings.HasSuffix(file.Name(), imagetype) && (len(fileNameList) == 0 || utils.Contains(fileNameList, file.Name())) { + fileContents, err := ioutil.ReadFile(filepath.Join(sourceDir, file.Name())) + if err != nil { + log.Error("Error occurred while reading image files", zap.String("zipXZFilePath", zipXZFilePath), zap.String("sourceDirectory", sourceDir), zap.String("fileName", file.Name()), zap.Error(err)) + return err + } + + zipFile, err := zipWriter.Create(file.Name()) + if err != nil { + log.Error("Error occurred while creating zip writer", zap.String("zipXZFilePath", zipXZFilePath), zap.String("sourceDirectory", sourceDir), zap.String("fileName", file.Name()), zap.Error(err)) + return err + } + + _, err = zipFile.Write(fileContents) + if err != nil { + log.Error("Error occurred while adding file to zip", zap.String("zipXZFilePath", zipXZFilePath), zap.String("sourceDirectory", sourceDir), zap.String("fileName", file.Name()), zap.Error(err)) + return err + } + } + } + + // Step 2: Close the zip writer to finalize the .zip data in the buffer + err = zipWriter.Close() + if err != nil { + log.Error("Error while finalizing zip writer", zap.String("zipXZFilePath", zipXZFilePath), zap.Error(err)) + return err + } + + // Step 3: Create the .zip.xz file and compress the .zip content + zipXZFile, err := os.Create(zipXZFilePath) + if err != nil { + log.Error("Error while creating .zip.xz file", zap.String("zipXZFilePath", zipXZFilePath), zap.Error(err)) + return err + } + defer zipXZFile.Close() + + // Step 4: Compress the .zip buffer with xz compression + xzWriter, err := xz.NewWriter(zipXZFile) + if err != nil { + log.Error("Error creating xz writer", zap.String("zipXZFilePath", zipXZFilePath), zap.Error(err)) + return err + } + defer xzWriter.Close() + + // Write the .zip buffer into the .xz writer to compress it + _, err = io.Copy(xzWriter, &zipBuffer) + if err != nil { + log.Error("Error writing .zip data to .zip.xz file", zap.String("zipXZFilePath", zipXZFilePath), zap.Error(err)) + return err + } + + return nil +} + +func CreateZipFile(zipFilePath string, sourceDir, imagetype string, fileNameList []string) error { + zipFile, err := os.Create(zipFilePath) + if err != nil { + return err + } + defer zipFile.Close() + + zipWriter := zip.NewWriter(zipFile) + defer zipWriter.Close() + + files, err := ioutil.ReadDir(sourceDir) + if err != nil { + log.Error("Error occurred while reading source directory", zap.String("zipFilePath", zipFilePath), zap.String("sourceDirectory", sourceDir), zap.Error(err)) + return err + } + + for _, file := range files { + if !file.IsDir() && strings.HasSuffix(file.Name(), imagetype) && (len(fileNameList) == 0 || utils.Contains(fileNameList, file.Name())) { + fileContents, err := ioutil.ReadFile(filepath.Join(sourceDir, file.Name())) + if err != nil { + log.Error("Error occurred while reading image files", zap.String("zipFilePath", zipFilePath), zap.String("sourceDirectory", sourceDir), zap.String("fileName", file.Name()), zap.Error(err)) + return err + } + + zipFile, err := zipWriter.Create(file.Name()) + if err != nil { + log.Error("Error occurred while creating zip writer", zap.String("zipFilePath", zipFilePath), zap.String("sourceDirectory", sourceDir), zap.String("fileName", file.Name()), zap.Error(err)) + return err + } + + _, err = zipFile.Write(fileContents) + if err != nil { + log.Error("Error occurred while adding file to zip", zap.String("zipFilePath", zipFilePath), zap.String("sourceDirectory", sourceDir), zap.String("fileName", file.Name()), zap.Error(err)) + return err + } + } + } + return nil +} diff --git a/alfred/cmd/core/app/masking/blur_mode_strategy.go b/alfred/cmd/core/app/masking/blur_mode_strategy.go new file mode 100644 index 0000000..f1c7e96 --- /dev/null +++ b/alfred/cmd/core/app/masking/blur_mode_strategy.go @@ -0,0 +1,101 @@ +package masking + +import ( + "alfred/config" + "alfred/pkg/ffmpeg" + "alfred/pkg/log" + "alfred/utils" + "errors" + "fmt" + "go.uber.org/zap" + "sync" +) + +type BlurMode struct { +} + +func (b *BlurMode) Mask(screenshotPathList, screenshotNameList []string, pathToUnzippedFiles, screen, zipName, imageType string) (bool, error) { + var height, width int + var err error + + for _, screenshot := range screenshotPathList { + height, width, err = ffmpeg.ExtractImageDimensions(screenshot) + if err != nil { + return false, err + } + break + } + + _, err = extractAndModifyImageGivenScreenshots(pathToUnzippedFiles, height, width, screenshotPathList, config.GetCoreConfig().MaskingConfig.MaskedScreensBlurScreenRatioMap, screen, imageType) + if err != nil { + return false, err + } + return true, nil +} + +func extractAndModifyImageGivenScreenshots(pathToUnzippedFiles string, height, width int, screenshotPathList []string, screenNameBlurRatioMap map[string]string, screen, imageType string) (string, error) { + + var wg sync.WaitGroup + var mu sync.Mutex + var errorsList []error + + maxConcurrency := config.GetCoreConfig().MaxBlurringVideoGofuncConcurrency + semaphore := make(chan struct{}, maxConcurrency) + + for _, screenshot := range screenshotPathList { + semaphore <- struct{}{} + wg.Add(1) + go func(screenshot string) { + + defer func() { + <-semaphore + wg.Done() + }() + + var blurScreenFactor string + + _, found := screenNameBlurRatioMap[screen] + if found { + blurScreenFactor = screenNameBlurRatioMap[screen] + } else { + blurScreenFactor = config.GetCoreConfig().MaskingConfig.DefaultBlurScreenRatio + } + + if blurScreenFactor == "1" { + var err error + if !utils.Contains(config.GetCoreConfig().MaskingConfig.MaskingFallBackScreens, screen) { + err = ffmpeg.ApplyBlurToRegion(screenshot, screenshot, height, width, blurScreenFactor, config.GetCoreConfig().MaskingConfig.DefaultBlurScreenStrength) + } else { + err = errors.New("invalid Third Party Screen") + } + if err != nil { + log.Error("Masking failed For "+pathToUnzippedFiles+" For Screen "+screen, zap.Error(err)) + mu.Lock() + err := ffmpeg.GenerateThirdPartyImage(screenshot, screen, pathToUnzippedFiles, height, width, imageType) + if err != nil { + log.Error("Creating Third Party Image Failed failed For "+pathToUnzippedFiles+" For Screen "+screen, zap.Error(err)) + errorsList = append(errorsList, err) + } + mu.Unlock() + return + } else { + return + } + } + + }(screenshot) + } + + wg.Wait() + close(semaphore) + + if len(errorsList) > 0 { + errMsg := "Errors occurred during processing:\n" + for _, err := range errorsList { + errMsg += fmt.Sprintf("- %v\n", err) + } + return utils.EMPTY, errors.New(errMsg) + } + + return pathToUnzippedFiles, nil +} diff --git a/alfred/cmd/core/app/masking/dsmask_mode_strategy.go b/alfred/cmd/core/app/masking/dsmask_mode_strategy.go new file mode 100644 index 0000000..a6abaab --- /dev/null +++ b/alfred/cmd/core/app/masking/dsmask_mode_strategy.go @@ -0,0 +1,65 @@ +package masking + +import ( + "alfred/cmd/core/app/external" + "alfred/cmd/core/app/helper" + "alfred/config" + "alfred/pkg/log" + "alfred/pkg/s3" + "alfred/utils" + "go.uber.org/zap" + "path/filepath" +) + +type DsMode struct { + s3Client s3.S3Client + clientName string + dataScienceService *external.DataScienceService + isAsyncDSMaskingEnabled bool +} + +func (d *DsMode) Mask(screenshotPathList, screenshotNameList []string, pathToUnzippedFiles, screen, zipName, imageType string) (bool, error) { + maskedFileName := zipName + utils.HYPHEN + utils.MASKED_FILE_SUFFIX + sessionUploadBucket := config.GetCoreConfig().S3Config.SessionUploadBucketClientMap[d.clientName] + + isPresent, err := d.s3Client.CheckIfPresent(sessionUploadBucket, maskedFileName+utils.ZipExtension.String()) + if isPresent { + return d.dataScienceService.ReplaceOriginalImagesWithDsMaskedImages(sessionUploadBucket, maskedFileName, pathToUnzippedFiles) + } + + toBeMaskedFileName, err := createZipFileFromScreenshots(screenshotNameList, pathToUnzippedFiles, zipName, imageType) + defer func() { + utils.DeleteFileFromLocal(toBeMaskedFileName, &[]string{}) + utils.DeleteFileFromLocal(maskedFileName, &[]string{}) + }() + + if err != nil { + log.Error("Error occurred while creating the zip file in ds mask stragtegy", zap.String("pathToUnzippedFiles", pathToUnzippedFiles), zap.String("zipName", zipName), zap.Error(err)) + return false, err + } + + _, err = d.s3Client.UploadFile(sessionUploadBucket, utils.TempDestinationFolder, toBeMaskedFileName+utils.ZipExtension.String(), toBeMaskedFileName+utils.ZipExtension.String()) + if err != nil { + log.Error("Error occurred while uploading the zip file in s3 in ds mask stragtegy", zap.String("ZipName", zipName), zap.String("pathToUnzippedFiles", pathToUnzippedFiles), zap.Error(err)) + return false, err + } + log.Info("Uploaded the zip file in s3 in ds mask stragtegy", zap.String("toBeMaskedFileName", toBeMaskedFileName)) + + _, err = d.dataScienceService.MaskImages(screen, sessionUploadBucket, toBeMaskedFileName, maskedFileName) + if err != nil { + log.Error("Error occurred while masking the images in ds mask stragtegy", zap.String("ZipName", zipName), zap.Error(err)) + return false, err + } + + if !d.isAsyncDSMaskingEnabled { + return d.dataScienceService.ReplaceOriginalImagesWithDsMaskedImages(sessionUploadBucket, maskedFileName, pathToUnzippedFiles) + } + return true, nil +} + +func createZipFileFromScreenshots(screenshotNames []string, pathToUnzippedFiles, zipName, imageType string) (string, error) { + toBeMaskedFileName := zipName + utils.HYPHEN + utils.TO_BE_MASKED_FILE_SUFFIX + maskedZipFilePath := filepath.Join(utils.TempDestinationFolder, toBeMaskedFileName+utils.ZipExtension.String()) + err := helper.CreateZipFile(maskedZipFilePath, pathToUnzippedFiles, imageType, screenshotNames) + return toBeMaskedFileName, err +} diff --git a/alfred/cmd/core/app/masking/masking_strategy.go b/alfred/cmd/core/app/masking/masking_strategy.go new file mode 100644 index 0000000..710138b --- /dev/null +++ b/alfred/cmd/core/app/masking/masking_strategy.go @@ -0,0 +1,30 @@ +package masking + +import ( + "alfred/cmd/core/app/external" + "alfred/pkg/s3" + "alfred/utils" +) + +type MaskingStrategy interface { + Mask(screenshotPathList, screenshotNameList []string, pathToUnzippedFiles, screen, zipName, imageType string) (bool, error) +} + +type Masking struct { + DataScienceService *external.DataScienceService + // add dependencies here for other strategies if required + strategy MaskingStrategy +} + +func (m *Masking) SetStrategy(screen string, s3Client s3.S3Client, clientName, maskingMode string, isAsyncDSMaskingEnabled bool) { + + if maskingMode == utils.BLUR_MODE { + m.strategy = &BlurMode{} + } else if maskingMode == utils.DS_MODE { + m.strategy = &DsMode{s3Client: s3Client, clientName: clientName, dataScienceService: m.DataScienceService, isAsyncDSMaskingEnabled: isAsyncDSMaskingEnabled} + } +} + +func (m *Masking) ExecuteMaskingscreenshots(screenshotPathList, screenshotNameList []string, pathToUnzippedFiles, screen, zipName, imageType string) (bool, error) { + return m.strategy.Mask(screenshotPathList, screenshotNameList, pathToUnzippedFiles, screen, zipName, imageType) +} diff --git a/alfred/cmd/core/app/server.go b/alfred/cmd/core/app/server.go new file mode 100644 index 0000000..468a19d --- /dev/null +++ b/alfred/cmd/core/app/server.go @@ -0,0 +1,237 @@ +package app + +import ( + "alfred/cmd/core/app/handler" + "alfred/config" + "alfred/internal/clients" + "alfred/internal/metrics" + "alfred/pkg/log" + "alfred/pkg/s3" + "alfred/repositoryAccessLayer" + "alfred/scheduler" + "alfred/utils" + "fmt" + "github.com/gin-gonic/gin" + "go.elastic.co/apm/module/apmgin/v2" + "go.uber.org/zap" + "net/http" + "strconv" + "time" +) + +type Server struct { + gin *gin.Engine + repositories *repositoryAccessLayer.RepositoryAccessLayer + s3Client s3.S3Client + httpClient *clients.HttpClient + mjolnirClient *clients.MjolnirClient + alfredIngestorClient *clients.AlfredIngestorClient + metricsMiddleware *gin.RouterGroup +} + +func NewServer(gin *gin.Engine, repositories *repositoryAccessLayer.RepositoryAccessLayer, s3Client s3.S3Client, httpClient *clients.HttpClient, mjolnirClient *clients.MjolnirClient, alfredIngestorClient *clients.AlfredIngestorClient) *Server { + return &Server{ + gin: gin, + repositories: repositories, + s3Client: s3Client, + httpClient: httpClient, + mjolnirClient: mjolnirClient, + alfredIngestorClient: alfredIngestorClient, + } +} + +func (s *Server) Handler() { + s.metricsMiddleware = s.createMetricMiddleware() + s.gin.Use(apmgin.Middleware(s.gin)) + s.healthCheckHandler() + s.webSessionHandler() + s.gin.Use(s.createMiddleware()) + s.mediaHandler() + s.cruiseControlHandler() + s.filterConfigHandler() + s.scheduleCron() + metrics.AdminHandler(config.GetCoreConfig().BaseConfig.MetricPort) +} + +func (s *Server) createMiddleware() gin.HandlerFunc { + + return func(c *gin.Context) { + whitelistedDomains := getWhitelistedDomains() + + //cors handling + c.Writer.Header().Set("Access-Control-Allow-Methods", "POST, OPTIONS, GET, PUT") + c.Writer.Header().Set("Access-Control-Allow-Headers", config.GetCoreConfig().CorsConfig.AllowedCustomHeaders) + origin := c.Request.Header.Get("Origin") + if !whitelistedDomains[origin] { + c.AbortWithStatus(http.StatusUnauthorized) + return + } + c.Writer.Header().Set("Access-Control-Allow-Origin", origin) + + if c.Request.Method == "OPTIONS" { + c.AbortWithStatus(http.StatusOK) + return + } + + //auth handling + bypassedSourceSet := getBypassSources() + isAuthEnabled := config.GetCoreConfig().AuthEnabled + source := c.GetHeader("Source") + + if isAuthEnabled && !bypassedSourceSet[source] { + sessionResponse, err := s.mjolnirClient.GetSessionResponse(c.Request.Header.Get(utils.X_SESSION_TOKEN)) + if err != nil || sessionResponse.StatusCode == 401 { + c.AbortWithStatus(http.StatusUnauthorized) + return + } + } + + startTime := time.Now() + c.Next() + endTime := float64(time.Since(startTime)) + + //metrics publishing + status := strconv.Itoa(c.Writer.Status()) + metrics.AlfredApiRequestCounter.WithLabelValues(c.Request.URL.Path, c.Request.Method, status).Inc() + metrics.AlfredApiRequestLatencySum.WithLabelValues(c.Request.URL.Path, c.Request.Method, status).Add(endTime) + metrics.AlfredApiRequestLatencyHistogram.WithLabelValues(c.Request.URL.Path, c.Request.Method, status).Observe(endTime) + metrics.AlfredApiRequestLatencySummary.WithLabelValues(c.Request.URL.Path, c.Request.Method, status).Observe(endTime) + } +} + +func (s *Server) customUrlMetricMiddleware(requestUrl string) *gin.RouterGroup { + return s.gin.Group("", func(c *gin.Context) { + whitelistedDomains := getWhitelistedDomains() + + //cors handling + c.Writer.Header().Set("Access-Control-Allow-Methods", "POST, OPTIONS, GET, PUT") + c.Writer.Header().Set("Access-Control-Allow-Headers", config.GetCoreConfig().CorsConfig.AllowedCustomHeaders) + origin := c.Request.Header.Get("Origin") + if !whitelistedDomains[origin] { + c.AbortWithStatus(http.StatusUnauthorized) + return + } + c.Writer.Header().Set("Access-Control-Allow-Origin", origin) + + if c.Request.Method == "OPTIONS" { + c.AbortWithStatus(http.StatusOK) + return + } + + //auth handling + bypassedSourceSet := getBypassSources() + isAuthEnabled := config.GetCoreConfig().AuthEnabled + source := c.GetHeader("Source") + + if isAuthEnabled && !bypassedSourceSet[source] { + sessionResponse, err := s.mjolnirClient.GetSessionResponse(c.Request.Header.Get(utils.X_SESSION_TOKEN)) + if err != nil || sessionResponse.StatusCode == 401 { + c.AbortWithStatus(http.StatusUnauthorized) + return + } + } + + startTime := time.Now() + c.Next() + endTime := float64(time.Since(startTime)) + status := strconv.Itoa(c.Writer.Status()) + metrics.AlfredApiRequestCounter.WithLabelValues(requestUrl, c.Request.Method, status).Inc() + metrics.AlfredApiRequestLatencySum.WithLabelValues(requestUrl, c.Request.Method, status).Add(endTime) + metrics.AlfredApiRequestLatencyHistogram.WithLabelValues(requestUrl, c.Request.Method, status).Observe(endTime) + metrics.AlfredApiRequestLatencySummary.WithLabelValues(requestUrl, c.Request.Method, status).Observe(endTime) + }) + +} + +func (s *Server) createMetricMiddleware() *gin.RouterGroup { + return s.gin.Group("", func(c *gin.Context) { + authKeyServiceMap := config.GetCoreConfig().AuthKeyServiceMap + authKey := c.Request.Header.Get(utils.X_AUTH_KEY) + _, found := authKeyServiceMap[authKey] + if !found { + c.AbortWithStatus(http.StatusUnauthorized) + return + } + + startTime := time.Now() + c.Next() + endTime := float64(time.Since(startTime)) + status := strconv.Itoa(c.Writer.Status()) + metrics.AlfredApiRequestCounter.WithLabelValues(c.Request.URL.Path, c.Request.Method, status).Inc() + metrics.AlfredApiRequestLatencySum.WithLabelValues(c.Request.URL.Path, c.Request.Method, status).Add(endTime) + metrics.AlfredApiRequestLatencyHistogram.WithLabelValues(c.Request.URL.Path, c.Request.Method, status).Observe(endTime) + metrics.AlfredApiRequestLatencySummary.WithLabelValues(c.Request.URL.Path, c.Request.Method, status).Observe(endTime) + }) +} + +func getBypassSources() map[string]bool { + bypassList := config.GetCoreConfig().ByPassAuthSources + bypassMap := make(map[string]bool) + for _, source := range bypassList { + sourceLocal := source + bypassMap[sourceLocal] = true + } + return bypassMap +} + +func getWhitelistedDomains() map[string]bool { + allowedList := make(map[string]bool) + domains := config.GetCoreConfig().CorsConfig.WhitelistedDomains + for _, domain := range domains { + domainLocal := domain + allowedList[domainLocal] = true + } + return allowedList +} + +func (s *Server) cruiseControlHandler() { + ccc := handler.NewCruiseControlHandler(s.repositories, s.alfredIngestorClient) + s.gin.GET("/cruise", ccc.FetchCruiseControlConfig) + s.gin.POST("/cruise/add_config", ccc.CreateCruiseControlConfig) + s.gin.GET("/cruise/app_versions", ccc.FetchAllCruiseControlConfigAppVersions) + s.gin.GET("/cruise/dropdowns", ccc.FetchDropdowns) +} + +func (s *Server) Start() { + port := strconv.Itoa(config.GetCoreConfig().BaseConfig.Port) + log.Info("starting alfred core server", zap.String("port", port)) + s.gin.Run(fmt.Sprintf(":%s", port)) +} + +func (s *Server) healthCheckHandler() { + s.gin.GET("/ping", func(c *gin.Context) { + c.String(http.StatusOK, "pong") + }) +} + +func (s *Server) mediaHandler() { + m := handler.NewMediaHandler(s.repositories, s.s3Client, s.httpClient) + sh := handler.NewAppSessionHandler(s.repositories, s.s3Client, s.httpClient) + wh := handler.NewWebSessionHandler(s.repositories, s.s3Client, s.httpClient) + eh := handler.NewErrorEventsHandler(s.repositories) + s.gin.GET("/request-video", m.RequestVideo) + s.gin.GET("/fetch-latest-video", m.FetchLatestVideo) + s.gin.GET("/historic-videos", m.FetchHistoricVideo) + s.gin.GET("/sessions", sh.FetchAppSessions) + s.gin.GET("/session/details", sh.FetchAppSessionDetails) + s.gin.GET("/events", sh.FetchEvents) + s.gin.GET("/web/sessions", wh.FetchAllWebSessions) + s.gin.GET("/session-errors", eh.FetchErrorEvents) + s.metricsMiddleware.GET("/internal/app/session/details", sh.FetchAppSessionDetails) +} + +func (s *Server) webSessionHandler() { + wh := handler.NewWebSessionHandler(s.repositories, s.s3Client, s.httpClient) + s.customUrlMetricMiddleware("/web/sessions/id").GET("/web/sessions/:id", wh.FetchWebSessions) +} + +func (s *Server) filterConfigHandler() { + m := handler.NewFilterConfigHandler(s.repositories) + s.gin.GET("/filters", m.FetchFilterConfig) + s.gin.GET("/web/filters", m.FetchWebFilters) +} + +func (s *Server) scheduleCron() { + //scheduler.ScheduleCronForDeviceMetricsToSlack(s.repositories, s.httpClient) + scheduler.ScheduleCronForErrorEventsUpdate(s.repositories) +} diff --git a/alfred/cmd/core/app/service/app_session_cosmos.go b/alfred/cmd/core/app/service/app_session_cosmos.go new file mode 100644 index 0000000..9cee618 --- /dev/null +++ b/alfred/cmd/core/app/service/app_session_cosmos.go @@ -0,0 +1,178 @@ +package service + +import ( + "alfred/api/request" + "alfred/api/response" + "alfred/cmd/core/app/helper" + "alfred/model/common" + "alfred/model/es" + "alfred/pkg/log" + "alfred/repositoryAccessLayer" + "alfred/utils" + "errors" + "github.com/u2takey/go-utils/slice" + "go.uber.org/zap" + "strings" +) + +type AppSessionCosmos struct { + sessionsAccessLayer repositoryAccessLayer.SessionsAccessLayer + eventsAccessLayer repositoryAccessLayer.EventsAccessLayer +} + +func NewAppSessionCosmos(repositories *repositoryAccessLayer.RepositoryAccessLayer) *AppSessionCosmos { + return &AppSessionCosmos{ + sessionsAccessLayer: repositories.SessionsAccessLayer, + eventsAccessLayer: repositories.EventsAccessLayer, + } +} + +func (c *AppSessionCosmos) FetchSessionDetails(filters request.SessionFilters, page *es.Page, sessionUploadIndex string, eventIngestionIndex string) ([]es.SessionResponse, error) { + deviceId := filters.DeviceId + customerId := filters.CustomerId + phoneNumber := filters.PhoneNumber + sessionId := filters.SessionId + startTimestamp := filters.StartTimestamp + endTimestamp := filters.EndTimestamp + labels := filters.Labels + appName := filters.AppName + screenName := filters.ScreenName + vertical := filters.Vertical + appVersion := filters.AppVersion + screenTags := filters.ScreenTags + codePushVersion := filters.CodePushVersion + agentEmailId := filters.AgentEmailId + sortBy := filters.SortBy + snapshotPerSecond := filters.SnapshotPerSecond + appOs := filters.AppOs + + if len(sessionId) != 0 { + return c.sessionsAccessLayer.FetchSessionWithSessionDuration([]string{sessionId}, sessionUploadIndex, page, sortBy) + } + + //Todo check for client here and avoid duplication here , we can use media handler + var deviceIds []string + + if len(deviceId) != 0 { + deviceIds = []string{deviceId} + } + + if utils.CheckFilterEnabled(labels, customerId, phoneNumber, appName, screenName, vertical, appVersion, deviceIds, screenTags, codePushVersion, agentEmailId, utils.EMPTY, snapshotPerSecond) { + labelsList, appNameList, screenNameList, verticalList, appVersionList, phoneNumberList, customerIdList, codePushVersionList, agentEmailIdList, snapshotPerSecondList, err := c.getFilterValues(labels, appName, screenName, vertical, appVersion, screenTags, phoneNumber, customerId, codePushVersion, agentEmailId, snapshotPerSecond) + + if err != nil { + return nil, err + } + snapshotPerSecondIntList, _ := utils.ConvertStringListToInt64List(snapshotPerSecondList) + + return c.FetchSessionDetailsFromLabels(labelsList, appNameList, screenNameList, verticalList, appVersionList, deviceIds, + startTimestamp, endTimestamp, page, sessionUploadIndex, eventIngestionIndex, phoneNumberList, customerIdList, + codePushVersionList, agentEmailIdList, sortBy, snapshotPerSecondIntList, appOs) + + } + + sessionUploadSearchIndex := helper.CreateSearchIndex(sessionUploadIndex, startTimestamp, endTimestamp) + return c.sessionsAccessLayer.FetchSessionWithTimeRange(startTimestamp, endTimestamp, page, sessionUploadSearchIndex, sessionUploadIndex, sortBy, appOs) +} + +func (c *AppSessionCosmos) CreateBucketsForSessionForResponse(sessions []es.SessionResponse) (sessionResponseDataList []response.SearchSessionResponseData, err error) { + sessionBuckets := make(map[string]*response.SearchSessionResponseData) + for _, session := range sessions { + sessionId := session.Source.BaseAttributes.SessionId + existingSessionResponseData, ok := sessionBuckets[sessionId] + if !ok { + sessionResponse := helper.CreateSessionResponse(session, session.Source.CustomerId) + sessionBuckets[sessionId] = &sessionResponse + } else { + existingSessionResponseData.BaseAttributesDTO.EndTimestamp = session.Source.BaseAttributes.EventTimestamp + existingSessionResponseData.DeviceAttributes = append(existingSessionResponseData.DeviceAttributes, + session.Source.SessionUploadEventAttributes.BeginningDeviceAttributes, + session.Source.SessionUploadEventAttributes.EndDeviceAttributes, + ) + if sessionBuckets[sessionId].BaseAttributesDTO.Metadata == nil && session.Source.BaseAttributes.Metadata != nil { + sessionResponse := helper.CreateSessionResponse(session, session.Source.CustomerId) + sessionBuckets[sessionId] = &sessionResponse + } + } + } + for _, sessionResponseData := range sessionBuckets { + sessionResponseDataLocal := sessionResponseData + sessionResponseDataList = append(sessionResponseDataList, *sessionResponseDataLocal) + } + return sessionResponseDataList, nil +} + +func (c *AppSessionCosmos) FetchSessionDetailsFromLabels(labels, appName, screenName, vertical, appVersion []string, + deviceIds []string, startTimestamp, endTimestamp int64, page *es.Page, sessionUploadIndex string, eventIngestionIndex string, + phoneNumber []string, customerId []string, codePushVersion []string, agentEmailId []string, sortBy string, + snapshotPerSecond []int64, appOs string) ([]es.SessionResponse, error) { + eventIngestionSearchIndex := helper.CreateSearchIndex(eventIngestionIndex, startTimestamp, endTimestamp) + eventResponse, err := c.eventsAccessLayer.FetchEventsWithLabels(labels, appName, screenName, nil, + vertical, appVersion, deviceIds, snapshotPerSecond, startTimestamp, endTimestamp, page, eventIngestionSearchIndex, + phoneNumber, customerId, codePushVersion, agentEmailId, appOs) + if err != nil { + log.Error("could not extract events for labels provided", zap.Error(err)) + return nil, err + } + uniqueSessions, idToEventMap := helper.GetUniqueSessionsFromEventResponse(eventResponse) + sessionResponse, err := c.sessionsAccessLayer.FetchSessionAndSessionDurationWithSessionIds(uniqueSessions, sessionUploadIndex, page, sortBy) + if err != nil && !errors.Is(err, &common.InvalidSessionError{}) { + log.Error("could not extract sessions for given event sessions", zap.Error(err)) + return nil, err + } + + uniqueSessionsFromSessionUpload := helper.GetUniqueSessionsFromSessionUpload(sessionResponse) + for _, sessions := range uniqueSessions { + sessionsLocal := sessions + if !slice.ContainsString(uniqueSessionsFromSessionUpload, sessionsLocal, nil) { + sessionResponse = append(sessionResponse, helper.CreateSessionResponseFromEventResponse(idToEventMap[sessions])) + } + } + return sessionResponse, nil +} + +func (c *AppSessionCosmos) getFilterValues(labels string, appName string, screenName string, vertical string, + appVersion string, screenTags string, phoneNumber string, customerId string, codePushVersion string, + agentEmailId string, snapshotPerSecond string) (labelsList, appNameList, screenNameList, verticalList, + appVersionList []string, phoneNumberList []string, customerIdList []string, codePushVersionList []string, + agentEmailIdList []string, snapshotPerSecondList []string, err error) { + if labels != utils.EMPTY { + labelsList = strings.Split(labels, utils.COMMA) + } + if appName != utils.EMPTY { + appNameList = strings.Split(appName, utils.COMMA) + } + if screenName != utils.EMPTY { + screenNameList = strings.Split(screenName, utils.COMMA) + } + if vertical != utils.EMPTY { + verticalList = strings.Split(vertical, utils.COMMA) + } + if appVersion != utils.EMPTY { + appVersionList = strings.Split(appVersion, utils.COMMA) + } + if phoneNumber != utils.EMPTY { + phoneNumberList = []string{phoneNumber} + } + if customerId != utils.EMPTY { + customerIdList = []string{customerId} + } + if agentEmailId != utils.EMPTY { + agentEmailIdList = []string{agentEmailId} + } + if codePushVersion != utils.EMPTY { + codePushVersionList = strings.Split(codePushVersion, utils.COMMA) + } + if snapshotPerSecond != utils.EMPTY { + snapshotPerSecondList = strings.Split(snapshotPerSecond, utils.COMMA) + } + if screenTags != utils.EMPTY { + screenTagsList := strings.Split(screenTags, utils.COMMA) + screens, err := getScreensByScreenTags(screenTagsList) + if err != nil { + return nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, err + } + screenNameList = append(screenNameList, screens...) + } + return labelsList, appNameList, screenNameList, verticalList, appVersionList, phoneNumberList, customerIdList, codePushVersionList, agentEmailIdList, snapshotPerSecondList, nil +} diff --git a/alfred/cmd/core/app/service/app_session_navi_app.go b/alfred/cmd/core/app/service/app_session_navi_app.go new file mode 100644 index 0000000..0bd67ad --- /dev/null +++ b/alfred/cmd/core/app/service/app_session_navi_app.go @@ -0,0 +1,185 @@ +package service + +import ( + "alfred/api/request" + "alfred/api/response" + "alfred/cmd/core/app/external" + "alfred/cmd/core/app/helper" + "alfred/config" + "alfred/internal/clients" + "alfred/model/common" + "alfred/model/es" + "alfred/pkg/log" + "alfred/repositoryAccessLayer" + "alfred/utils" + "errors" + "github.com/u2takey/go-utils/slice" + "go.uber.org/zap" + "strings" +) + +type AppSessionNaviApp struct { + sessionsAccessLayer repositoryAccessLayer.SessionsAccessLayer + eventsAccessLayer repositoryAccessLayer.EventsAccessLayer + customerService *external.CustomerService +} + +func NewAppSessionNaviApp(repositories *repositoryAccessLayer.RepositoryAccessLayer, + httpClient *clients.HttpClient) *AppSessionNaviApp { + return &AppSessionNaviApp{ + sessionsAccessLayer: repositories.SessionsAccessLayer, + eventsAccessLayer: repositories.EventsAccessLayer, + customerService: external.NewCustomerService(httpClient), + } +} + +func (s *AppSessionNaviApp) FetchSessionDetails(filters request.SessionFilters, page *es.Page, + sessionUploadIndex string, eventIngestionIndex string) ([]es.SessionResponse, error) { + deviceId := filters.DeviceId + customerId := filters.CustomerId + phoneNumber := filters.PhoneNumber + sessionId := filters.SessionId + startTimestamp := filters.StartTimestamp + endTimestamp := filters.EndTimestamp + labels := filters.Labels + appName := filters.AppName + screenName := filters.ScreenName + fragmentNames := filters.FragmentNames + vertical := filters.Vertical + appVersion := filters.AppVersion + screenTags := filters.ScreenTags + snapshotPerSecond := filters.SnapshotPerSecond + sortBy := filters.SortBy + appOs := filters.AppOs + + if len(sessionId) != 0 { + return s.sessionsAccessLayer.FetchSessionWithSessionDuration([]string{sessionId}, sessionUploadIndex, page, sortBy) + } + + //Todo check for client here and avoid duplication here , we can use media handler + var deviceIds []string + + if deviceId != utils.EMPTY { + deviceIds = []string{deviceId} + } + deviceIdsLocal, err := s.customerService.GetDeviceIds(phoneNumber, customerId, deviceIds) + if err != nil { + return nil, err + } + deviceIds = deviceIdsLocal + + if utils.CheckFilterEnabled(labels, customerId, phoneNumber, appName, screenName, vertical, appVersion, deviceIds, screenTags, utils.EMPTY, utils.EMPTY, fragmentNames, snapshotPerSecond) { + labelsList, appNameList, screenNameList, verticalList, appVersionList, fragmentNameList, snapshotPerSecondList, err := s.getFilterValues(labels, appName, screenName, vertical, appVersion, screenTags, fragmentNames, snapshotPerSecond) + + if err != nil { + return nil, err + } + snapshotPerSecondIntList, _ := utils.ConvertStringListToInt64List(snapshotPerSecondList) + + return s.FetchSessionDetailsFromLabels(labelsList, appNameList, screenNameList, fragmentNameList, verticalList, appVersionList, deviceIds, snapshotPerSecondIntList, + startTimestamp, endTimestamp, page, sessionUploadIndex, eventIngestionIndex, sortBy, appOs) + } + + sessionUploadSearchIndex := helper.CreateSearchIndex(sessionUploadIndex, startTimestamp, endTimestamp) + return s.sessionsAccessLayer.FetchSessionWithTimeRange(startTimestamp, endTimestamp, page, sessionUploadSearchIndex, sessionUploadIndex, sortBy, appOs) +} + +func (s *AppSessionNaviApp) CreateBucketsForSessionForResponse(sessions []es.SessionResponse) (sessionResponseDataList []response.SearchSessionResponseData, err error) { + sessionBuckets := make(map[string]*response.SearchSessionResponseData) + for _, session := range sessions { + sessionId := session.Source.BaseAttributes.SessionId + existingSessionResponseData, ok := sessionBuckets[sessionId] + if !ok { + sessionResponse := helper.CreateSessionResponse(session, s.customerService.GetCustomerRefId(session.Source.CustomerId)) + sessionBuckets[sessionId] = &sessionResponse + } else { + if session.Source.BaseAttributes.HasErrors { + existingSessionResponseData.BaseAttributesDTO.HasErrors = true + } + existingSessionResponseData.BaseAttributesDTO.EndTimestamp = session.Source.BaseAttributes.EventTimestamp + existingSessionResponseData.DeviceAttributes = append(existingSessionResponseData.DeviceAttributes, + session.Source.SessionUploadEventAttributes.BeginningDeviceAttributes, + session.Source.SessionUploadEventAttributes.EndDeviceAttributes, + ) + } + } + for _, sessionResponseData := range sessionBuckets { + sessionResponseDataLocal := sessionResponseData + sessionResponseDataList = append(sessionResponseDataList, *sessionResponseDataLocal) + } + return sessionResponseDataList, nil +} + +func (s *AppSessionNaviApp) FetchSessionDetailsFromLabels(labels, appName, screenName, fragmentName, vertical, + appVersion []string, deviceIds []string, snapshotPerSecond []int64, startTimestamp, endTimestamp int64, page *es.Page, + sessionUploadIndex string, eventIngestionIndex, sortBy, appOs string) ([]es.SessionResponse, error) { + + eventIngestionSearchIndex := helper.CreateSearchIndex(eventIngestionIndex, startTimestamp, endTimestamp) + eventResponse, err := s.eventsAccessLayer.FetchEventsWithLabels(labels, appName, screenName, fragmentName, vertical, appVersion, + deviceIds, snapshotPerSecond, startTimestamp, endTimestamp, page, eventIngestionSearchIndex, nil, + nil, nil, nil, appOs) + + if err != nil { + log.Error("could not extract events for labels provided", zap.Error(err)) + return nil, err + } + uniqueSessions, idToEventMap := helper.GetUniqueSessionsFromEventResponse(eventResponse) + sessionResponse, err := s.sessionsAccessLayer.FetchSessionAndSessionDurationWithSessionIds(uniqueSessions, sessionUploadIndex, page, sortBy) + if err != nil && !errors.Is(err, &common.InvalidSessionError{}) { + log.Error("could not extract sessions for given event sessions", zap.Error(err)) + return nil, err + } + + uniqueSessionsFromSessionUpload := helper.GetUniqueSessionsFromSessionUpload(sessionResponse) + for _, sessions := range uniqueSessions { + sessionsLocal := sessions + if !slice.ContainsString(uniqueSessionsFromSessionUpload, sessionsLocal, nil) { + sessionResponse = append(sessionResponse, helper.CreateSessionResponseFromEventResponse(idToEventMap[sessions])) + } + } + return sessionResponse, nil +} + +func (s *AppSessionNaviApp) getFilterValues(labels, appName, screenName, vertical, appVersion, screenTags, fragmentName, snapshotPerSecond string) (labelsList, appNameList, screenNameList, verticalList, appVersionList, fragmentNameList, snapshotPerSecondList []string, err error) { + if labels != utils.EMPTY { + labelsList = strings.Split(labels, utils.COMMA) + } + if appName != utils.EMPTY { + appNameList = strings.Split(appName, utils.COMMA) + } + if screenName != utils.EMPTY { + screenNameList = strings.Split(screenName, utils.COMMA) + } + if fragmentName != utils.EMPTY { + fragmentNameList = strings.Split(fragmentName, utils.COMMA) + } + if snapshotPerSecond != utils.EMPTY { + snapshotPerSecondList = strings.Split(snapshotPerSecond, utils.COMMA) + } + if vertical != utils.EMPTY { + verticalList = strings.Split(vertical, utils.COMMA) + } + if appVersion != utils.EMPTY { + appVersionList = strings.Split(appVersion, utils.COMMA) + } + if screenTags != utils.EMPTY { + screenTagsList := strings.Split(screenTags, utils.COMMA) + screens, err := getScreensByScreenTags(screenTagsList) + if err != nil { + return nil, nil, nil, nil, nil, nil, nil, err + } + screenNameList = append(screenNameList, screens...) + } + return labelsList, appNameList, screenNameList, verticalList, appVersionList, fragmentNameList, snapshotPerSecondList, nil +} + +func getScreensByScreenTags(screenTags []string) ([]string, error) { + configMap := config.GetCoreConfig().ScreenTagFilters + var screens []string + for _, tag := range screenTags { + localTag := tag + screens = append(screens, configMap[localTag]...) + } + + return screens, nil +} diff --git a/alfred/cmd/core/app/service/app_sesssion_navi_app_apple_os.go b/alfred/cmd/core/app/service/app_sesssion_navi_app_apple_os.go new file mode 100644 index 0000000..1cfedd3 --- /dev/null +++ b/alfred/cmd/core/app/service/app_sesssion_navi_app_apple_os.go @@ -0,0 +1,131 @@ +package service + +import ( + "alfred/api/request" + "alfred/api/response" + "alfred/cmd/core/app/external" + "alfred/cmd/core/app/helper" + "alfred/internal/clients" + "alfred/model/common" + "alfred/model/es" + "alfred/pkg/log" + "alfred/repositoryAccessLayer" + "alfred/utils" + "errors" + "go.uber.org/zap" + "strings" +) + +type AppSessionNaviAppIos struct { + sessionsAccessLayer repositoryAccessLayer.SessionsAccessLayer + customerService *external.CustomerService +} + +func NewAppSessionNaviAppIos(repositories *repositoryAccessLayer.RepositoryAccessLayer, + httpClient *clients.HttpClient) *AppSessionNaviAppIos { + return &AppSessionNaviAppIos{ + sessionsAccessLayer: repositories.SessionsAccessLayer, + customerService: external.NewCustomerService(httpClient), + } +} + +func (s *AppSessionNaviAppIos) FetchSessionDetails(filters request.SessionFilters, page *es.Page, + sessionUploadIndex string, eventIngestionIndex string) ([]es.SessionResponse, error) { + deviceId := filters.DeviceId + customerId := filters.CustomerId + phoneNumber := filters.PhoneNumber + sessionId := filters.SessionId + startTimestamp := filters.StartTimestamp + endTimestamp := filters.EndTimestamp + labels := filters.Labels + appName := filters.AppName + screenName := filters.ScreenName + fragmentNames := filters.FragmentNames + vertical := filters.Vertical + appVersion := filters.AppVersion + screenTags := filters.ScreenTags + snapshotPerSecond := filters.SnapshotPerSecond + sortBy := filters.SortBy + appOs := filters.AppOs + if len(sessionId) != 0 { + return s.sessionsAccessLayer.FetchSessionWithSessionDuration([]string{sessionId}, sessionUploadIndex, page, sortBy) + } + + var deviceIds []string + + if len(deviceId) != 0 { + deviceIds = []string{deviceId} + } + deviceIdsLocal, err := s.customerService.GetDeviceIds(phoneNumber, customerId, deviceIds) + if err != nil { + return nil, err + } + deviceIds = deviceIdsLocal + + if utils.CheckFilterEnabled(labels, customerId, phoneNumber, appName, screenName, vertical, appVersion, deviceIds, screenTags, utils.EMPTY, utils.EMPTY, fragmentNames, snapshotPerSecond) { + appVersionList, err := s.getFilterValues(appVersion) + + if err != nil { + log.Error("error while extracting filters", zap.Error(err)) + return nil, err + } + + return s.FetchSessionDetailsFromLabels(appVersionList, deviceIds, startTimestamp, endTimestamp, page, sessionUploadIndex, sortBy, appOs) + } + + sessionUploadSearchIndex := helper.CreateSearchIndex(sessionUploadIndex, startTimestamp, endTimestamp) + return s.sessionsAccessLayer.FetchSessionWithTimeRange(startTimestamp, endTimestamp, page, sessionUploadSearchIndex, sessionUploadIndex, sortBy, appOs) +} + +func (s *AppSessionNaviAppIos) FetchSessionDetailsFromLabels(appVersion []string, deviceIds []string, startTimestamp, + endTimestamp int64, page *es.Page, sessionUploadIndex string, sortBy, appOs string) ([]es.SessionResponse, error) { + + sessionUploadSearchIndex := helper.CreateSearchIndex(sessionUploadIndex, startTimestamp, endTimestamp) + sessionResponseWithLabels, err := s.sessionsAccessLayer.FetchSessionWithLabels(appVersion, deviceIds, startTimestamp, endTimestamp, page, sessionUploadSearchIndex, appOs) + if err != nil { + log.Error("could not extract sessions for labels provided", zap.Error(err)) + return nil, err + } + uniqueSessions, _ := helper.GetUniqueSessionsFromSessionResponse(sessionResponseWithLabels) + sessionResponse, err := s.sessionsAccessLayer.FetchSessionAndSessionDurationWithSessionIds(uniqueSessions, sessionUploadIndex, page, sortBy) + if err != nil && !errors.Is(err, &common.InvalidSessionError{}) { + log.Error("could not extract sessions for extracted sessions", zap.Error(err)) + return nil, err + } + + return sessionResponse, nil +} + +func (s *AppSessionNaviAppIos) CreateBucketsForSessionForResponse(sessions []es.SessionResponse) (sessionResponseDataList []response.SearchSessionResponseData, err error) { + sessionBuckets := make(map[string]*response.SearchSessionResponseData) + for _, session := range sessions { + sessionId := session.Source.BaseAttributes.SessionId + existingSessionResponseData, ok := sessionBuckets[sessionId] + if !ok { + sessionResponse := helper.CreateSessionResponse(session, s.customerService.GetCustomerRefId(session.Source.CustomerId)) + sessionBuckets[sessionId] = &sessionResponse + } else { + if session.Source.BaseAttributes.HasErrors { + existingSessionResponseData.BaseAttributesDTO.HasErrors = true + } + existingSessionResponseData.BaseAttributesDTO.EndTimestamp = session.Source.BaseAttributes.EventTimestamp + existingSessionResponseData.DeviceAttributes = append(existingSessionResponseData.DeviceAttributes, + session.Source.SessionUploadEventAttributes.BeginningDeviceAttributes, + session.Source.SessionUploadEventAttributes.EndDeviceAttributes, + ) + } + } + for _, sessionResponseData := range sessionBuckets { + sessionResponseDataLocal := sessionResponseData + sessionResponseDataList = append(sessionResponseDataList, *sessionResponseDataLocal) + } + return sessionResponseDataList, nil +} + +func (s *AppSessionNaviAppIos) getFilterValues(appVersion string) (appVersionList []string, err error) { + + if appVersion != utils.EMPTY { + appVersionList = strings.Split(appVersion, utils.COMMA) + } + return appVersionList, nil +} diff --git a/alfred/cmd/core/app/service/device_metrics_service.go b/alfred/cmd/core/app/service/device_metrics_service.go new file mode 100644 index 0000000..6f801cb --- /dev/null +++ b/alfred/cmd/core/app/service/device_metrics_service.go @@ -0,0 +1,213 @@ +package service + +import ( + "alfred/cmd/core/app/helper" + "alfred/config" + "alfred/internal/clients" + "alfred/model/core" + "alfred/model/es" + "alfred/pkg/log" + "alfred/repositoryAccessLayer" + "alfred/utils" + "fmt" + "go.uber.org/zap" + "strconv" + "sync" +) + +type DeviceMetrics interface { + PublishDeviceMetrics(client string, cronTime int64) + UpdateDeviceMetrics(client string, cronTime int64) + PublishDeviceAttributesToSlack(snapshotPerSecond int64, client string, cronTime int64) + UpdateDeviceAttributes(snapshotPerSecond int64, client string, cronTime int64) +} + +type DeviceMetricsImpl struct { + slackService clients.SlackServiceClient + sessionsAccessLayer repositoryAccessLayer.SessionsAccessLayer + deviceMetricsAccessLayer repositoryAccessLayer.DeviceMetricsAccessLayer +} + +func NewDeviceMetrics(repositories *repositoryAccessLayer.RepositoryAccessLayer, httpClient *clients.HttpClient) *DeviceMetricsImpl { + return &DeviceMetricsImpl{ + slackService: &clients.SlackServiceClientImpl{HttpClient: httpClient.HttpClient}, + sessionsAccessLayer: repositories.SessionsAccessLayer, + deviceMetricsAccessLayer: repositories.DeviceMetricsAccessLayer, + } +} + +func (dm *DeviceMetricsImpl) PublishDeviceMetrics(client string, cronTime int64) { + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + dm.PublishDeviceAttributesToSlack(utils.RECORDING_2_FPS, client, cronTime) + }() + wg.Add(1) + go func() { + defer wg.Done() + dm.PublishDeviceAttributesToSlack(utils.DEFAULT_RECORDING_FPS, client, cronTime) + }() + wg.Wait() + +} + +func (dm *DeviceMetricsImpl) UpdateDeviceMetrics(client string, cronTime int64) { + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + dm.UpdateDeviceAttributes(utils.RECORDING_2_FPS, client, cronTime) + }() + wg.Add(1) + go func() { + defer wg.Done() + dm.UpdateDeviceAttributes(utils.DEFAULT_RECORDING_FPS, client, cronTime) + }() + wg.Wait() + +} + +func (dm *DeviceMetricsImpl) PublishDeviceAttributesToSlack(snapshotPerSecond int64, client string, cronTime int64) { + endTimeStamp := utils.GetCurrentTimeInMillis() + startTimeStamp := endTimeStamp - cronTime + deviceIndex := config.GetCoreConfig().ElasticSearchConfig.DeviceMetricsIndexClientMap[client] + deviceSearchIndexList := helper.CreateSearchIndex(deviceIndex, startTimeStamp, endTimeStamp) + + response, err := dm.deviceMetricsAccessLayer.GetDeviceMetrics(startTimeStamp, endTimeStamp, snapshotPerSecond, client, deviceSearchIndexList) + if err != nil { + log.Error("Unable to fetch Latest Device Metrics", zap.Error(err)) + return + } + + batteryMap := make(map[string]float64) + memoryMap := make(map[string]float64) + timeMap := make(map[string]float64) + + batteryKeys := make(utils.Set) + memoryKeys := make(utils.Set) + timeKeys := make(utils.Set) + + totalBatteries, totalMemories, totalTimes := 0.0, 0.0, 0.0 + + for _, deviceAttribute := range response { + for key, value := range deviceAttribute.Source.DeviceMetrics.BatteryDropMap { + batteryMap[key] += value + totalBatteries += value + batteryKeys.Add(key) + } + for key, value := range deviceAttribute.Source.DeviceMetrics.MemoryUsageMap { + memoryMap[key] += value + totalMemories += value + memoryKeys.Add(key) + } + for key, value := range deviceAttribute.Source.DeviceMetrics.TimeDurationMap { + timeMap[key] += value + totalTimes += value + timeKeys.Add(key) + } + } + + messageVariables := make(map[string]interface{}) + + messageVariables["battery_decrease_P90"], messageVariables["battery_decrease_P95"], messageVariables["battery_decrease_P50"] = dm.calculatePercentiles(batteryMap, totalBatteries, batteryKeys.Elements()) + messageVariables["memory_increase_P90"], messageVariables["memory_increase_P95"], messageVariables["memory_increase_P50"] = dm.calculatePercentiles(memoryMap, totalMemories, memoryKeys.Elements()) + messageVariables["video_duration_P90"], _, _ = dm.calculatePercentiles(timeMap, totalTimes, timeKeys.Elements()) + messageVariables["snapshot_per_second"] = snapshotPerSecond + messageVariables["channelToken"] = config.GetCoreConfig().DeviceMonitoringConfig.DeviceMonitoringChannelIdMap[client] + + log.Info("Device Metrics To Be Pushed To Slack: ", zap.Any("Message Body:", messageVariables)) + + if messageVariables["battery_decrease_P90"].(float64) >= config.GetCoreConfig().DeviceMonitoringConfig.BatteryAlertMap[client].(float64) || messageVariables["memory_increase_P90"].(float64) >= config.GetCoreConfig().DeviceMonitoringConfig.MemoryAlertMap[client].(float64) { + dm.slackService.SendMessageForDeviceMetrics(messageVariables) + } + +} + +func (dm *DeviceMetricsImpl) UpdateDeviceAttributes(snapshotPerSecond int64, client string, cronTime int64) { + endTimeStamp := utils.GetCurrentTimeInMillis() + startTimeStamp := endTimeStamp - cronTime + sessionUploadIndex := config.GetCoreConfig().ElasticSearchConfig.SessionUploadIndexClientMap[client] + deviceUploadIndex := config.GetCoreConfig().ElasticSearchConfig.DeviceMetricsIndexClientMap[client] + + currentPage := int64(0) + batteryMap := make(map[string]float64) + memoryMap := make(map[string]float64) + timeMap := make(map[string]float64) + + for { + if currentPage*utils.SessionUpperLimit >= int64(config.GetCoreConfig().DeviceMonitoringConfig.DeviceMetricsTotalLimitMap[client].(float64)) { + break + } + + page := &es.Page{ + PageSize: utils.SessionUpperLimit, + PageNumber: currentPage, + } + + result, err := dm.sessionsAccessLayer.FetchDeviceAttributesForMetrics(startTimeStamp, endTimeStamp, snapshotPerSecond, sessionUploadIndex, page) + if err != nil { + log.Error("Fetching device metrics failed", zap.Error(err)) + } + if len(result.Aggregations.Filter.Buckets.Buckets) == 0 { + break + } + + for _, buckets := range result.Aggregations.Filter.Buckets.Buckets { + batteryDiff := buckets.MaxBeginningBattery.Value - buckets.MinEndBattery.Value + memoryDiff := buckets.MaxEndMemory.Value - buckets.MinEndMemory.Value + timeDiff := (buckets.MaxEventEndTimestamp.Value - buckets.MinClientTimestamp.Value) / 1000 + + if batteryDiff > 0 { + batteryMap[fmt.Sprintf("%.1f", batteryDiff)] += 1 + } + if memoryDiff > 0 { + memoryMap[fmt.Sprintf("%.1f", memoryDiff)] += 1 + } + if timeDiff > 0 { + timeMap[fmt.Sprintf("%.1f", timeDiff)] += 1 + } + + } + + currentPage++ + } + + deviceMetrics := core.DeviceMetricsModel{ + DeviceMetrics: core.DeviceMetrics{ + CreatedAt: utils.GetCurrentTimeInMillis(), + SnapshotPerSecond: snapshotPerSecond, + ClientName: client, + BatteryDropMap: batteryMap, + MemoryUsageMap: memoryMap, + TimeDurationMap: timeMap, + }, + } + + _ = dm.deviceMetricsAccessLayer.InsertDeviceMetrics(&deviceMetrics, deviceUploadIndex) + +} + +func (dm *DeviceMetricsImpl) calculatePercentiles(attributeMap map[string]float64, totalAttribute float64, keys []string) (p90, p95, p50 float64) { + + currentAttribute := 0.0 + + for _, attributeKey := range keys { + + currentAttribute += attributeMap[attributeKey] + + if currentAttribute >= utils.P90*float64(totalAttribute) { + p90, _ = strconv.ParseFloat(attributeKey, 64) + } + if currentAttribute >= utils.P95*float64(totalAttribute) { + p95, _ = strconv.ParseFloat(attributeKey, 64) + break + } + if currentAttribute >= utils.P50*float64(totalAttribute) { + p50, _ = strconv.ParseFloat(attributeKey, 64) + } + } + + return p90, p95, p50 +} diff --git a/alfred/cmd/core/app/service/filter_cosmos.go b/alfred/cmd/core/app/service/filter_cosmos.go new file mode 100644 index 0000000..a26e597 --- /dev/null +++ b/alfred/cmd/core/app/service/filter_cosmos.go @@ -0,0 +1,103 @@ +package service + +import ( + "alfred/api/response" + "alfred/cmd/core/app/helper" + "alfred/pkg/cache" + "alfred/pkg/log" + "alfred/repositoryAccessLayer" + "alfred/utils" + "go.uber.org/zap" +) + +type FilterCosmos struct { + eventsAccessLayer repositoryAccessLayer.EventsAccessLayer + cacheClient cache.ConfigClientInterface +} + +func NewFilterCosmos(repositories *repositoryAccessLayer.RepositoryAccessLayer) *FilterCosmos { + return &FilterCosmos{ + eventsAccessLayer: repositories.EventsAccessLayer, + cacheClient: cache.NewCacheConfig(), + } +} + +func (fn *FilterCosmos) CreateFilterResponse(eventIngestionIndex string) []response.FilterResponseData { + return []response.FilterResponseData{ + { + FilterName: "Labels", + FilterKey: "labels", + FilterData: fn.createLabelTypeData(), + SelectionConfig: response.MULTI_SELECT, + }, + { + FilterName: "App Name", + FilterKey: "app_version_name", + FilterData: fn.getFilterValues("base_attributes.app_version_name", eventIngestionIndex), + SelectionConfig: response.MULTI_SELECT, + }, + { + FilterName: "Code Push Name", + FilterKey: "code_push_version", + FilterData: fn.getFilterValues("base_attributes.metadata.code_push_version.keyword", eventIngestionIndex), + SelectionConfig: response.MULTI_SELECT, + }, + { + FilterName: "Screen Name", + FilterKey: "screen_name", + FilterData: fn.getFilterValues("events.screen_name", eventIngestionIndex), + SelectionConfig: response.MULTI_SELECT, + }, + { + FilterName: "App Version Code", + FilterKey: "app_version_code", + FilterData: fn.getFilterValues("base_attributes.app_version_code", eventIngestionIndex), + SelectionConfig: response.MULTI_SELECT, + }, + { + FilterName: "Select Dates", + FilterKey: "date_range_picker", + SelectionConfig: response.RANGE_PICKER, + }, + } +} + +func (fn *FilterCosmos) getFilterValues(key string, eventIngestionIndex string) []response.FilterData { + values := fn.getUniqueValue(key, eventIngestionIndex) + return helper.MapToFilterData(values) +} + +func (fn *FilterCosmos) getUniqueValue(key string, eventIngestionIndex string) (values []string) { + valueInCache, found := fn.cacheClient.Get(key + utils.COSMOS) + if found { + //cache hit + values = valueInCache.([]string) + } else { + //cache miss + valuesFromDb, err := fn.eventsAccessLayer.FetchUniqueKeys(key, eventIngestionIndex) + if err != nil { + log.Error("error while fetching unique keys for cosmos", zap.Error(err)) + return nil + } + fn.cacheClient.PutWithDefaultTtl(key+utils.COSMOS, valuesFromDb) + values = valuesFromDb + } + return values +} + +func (fn *FilterCosmos) createLabelTypeData() []response.FilterData { + return []response.FilterData{ + { + Label: "ANR", + Value: "ANR_EVENT", + }, + { + Label: "CRASH", + Value: "CRASH_ANALYTICS_EVENT", + }, + { + Label: "SWW", + Value: "ERROR_LOG", + }, + } +} diff --git a/alfred/cmd/core/app/service/filter_navi.go b/alfred/cmd/core/app/service/filter_navi.go new file mode 100644 index 0000000..ba0b831 --- /dev/null +++ b/alfred/cmd/core/app/service/filter_navi.go @@ -0,0 +1,153 @@ +package service + +import ( + "alfred/api/response" + "alfred/cmd/core/app/helper" + "alfred/config" + "alfred/pkg/cache" + "alfred/pkg/log" + "alfred/repositoryAccessLayer" + "alfred/utils" + "go.uber.org/zap" +) + +type FilterNavi struct { + eventsAccessLayer repositoryAccessLayer.EventsAccessLayer + appFragmentAccessLayer repositoryAccessLayer.AppFragmentsAccessLayer + cacheClient cache.ConfigClientInterface +} + +func NewFilterNavi(repositories *repositoryAccessLayer.RepositoryAccessLayer) *FilterNavi { + return &FilterNavi{ + eventsAccessLayer: repositories.EventsAccessLayer, + appFragmentAccessLayer: repositories.AppFragmentsAccessLayer, + cacheClient: cache.NewCacheConfig(), + } +} + +func (fn *FilterNavi) CreateFilterResponse(eventIngestionIndex string) []response.FilterResponseData { + return []response.FilterResponseData{ + { + FilterName: "Labels", + FilterKey: "labels", + FilterData: fn.createLabelTypeData(), + SelectionConfig: response.MULTI_SELECT, + }, + { + FilterName: "App Name", + FilterKey: "app_version_name", + FilterData: fn.getFilterValues("base_attributes.app_version_name", eventIngestionIndex), + SelectionConfig: response.MULTI_SELECT, + }, + { + FilterName: "Screen Name", + FilterKey: "screen_name", + FilterData: fn.getFilterValues("events.screen_name", eventIngestionIndex), + SelectionConfig: response.MULTI_SELECT, + }, + { + FilterName: "Vertical", + FilterKey: "vertical", + FilterData: fn.getFilterValues("events.module_name", eventIngestionIndex), + SelectionConfig: response.MULTI_SELECT, + }, + { + FilterName: "App Version Code", + FilterKey: "app_version_code", + FilterData: fn.getFilterValues("base_attributes.app_version_code", eventIngestionIndex), + SelectionConfig: response.MULTI_SELECT, + }, + { + FilterName: "Screen Tag", + FilterKey: "screen_tag", + FilterData: fn.getScreenTags(), + SelectionConfig: response.MULTI_SELECT, + }, + { + FilterName: "Select Dates", + FilterKey: "date_range_picker", + SelectionConfig: response.RANGE_PICKER, + }, + } +} + +func (fn *FilterNavi) getFilterValues(key string, eventIngestionIndex string) []response.FilterData { + values := fn.getUniqueValue(key, eventIngestionIndex) + return helper.MapToFilterData(values) +} + +func (fn *FilterNavi) getUniqueValue(key string, eventIngestionIndex string) (values []string) { + valueInCache, found := fn.cacheClient.Get(key + utils.NAVI_USER_APP) + if found { + //cache hit + values = valueInCache.([]string) + } else { + valuesFromDb := []string{} + err := error(nil) + if key == utils.EVENTS_SCREEN_NAME { + minAppVersion := config.GetCoreConfig().MinAppVersionForScreenNameFilter + valuesFromDb, err = fn.eventsAccessLayer.FetchUniqueKeysWithFilters(key, eventIngestionIndex, minAppVersion) + } else { + valuesFromDb, err = fn.eventsAccessLayer.FetchUniqueKeys(key, eventIngestionIndex) + } + //cache miss + + if err != nil { + log.Error("error while fetching unique keys for navi app", zap.Error(err)) + return nil + } + fn.cacheClient.PutWithDefaultTtl(key+utils.NAVI_USER_APP, valuesFromDb) + values = valuesFromDb + } + return values +} + +func (fn *FilterNavi) getFilterValuesForFragments(key string, fragmentIngestionIndex string) []response.FilterData { + valueInCache, found := fn.cacheClient.Get(key + utils.NAVI_USER_APP) + var values []string + if found { + //cache hit + values = valueInCache.([]string) + } else { + //cache miss + valuesFromDb, err := fn.appFragmentAccessLayer.FetchUniqueFragments(key, fragmentIngestionIndex) + if err != nil { + return nil + } + fn.cacheClient.PutWithDefaultTtl(key+utils.NAVI_USER_APP, valuesFromDb) + values = valuesFromDb + } + values = fn.getUniqueValue(key, fragmentIngestionIndex) + return helper.MapToFilterData(values) +} + +func (fn *FilterNavi) createLabelTypeData() []response.FilterData { + return []response.FilterData{ + { + Label: "ANR", + Value: "ANR_EVENT", + }, + { + Label: "CRASH", + Value: "CRASH_ANALYTICS_EVENT", + }, + { + Label: "SWW", + Value: "ERROR_LOG", + }, + } +} + +func (fn *FilterNavi) getScreenTags() []response.FilterData { + configMap := config.GetCoreConfig().ScreenTagFilters + var result []response.FilterData + for k, _ := range configMap { + localKey := k + data := response.FilterData{ + Label: localKey, + Value: localKey, + } + result = append(result, data) + } + return result +} diff --git a/alfred/cmd/core/app/service/filter_navi_apple_os.go b/alfred/cmd/core/app/service/filter_navi_apple_os.go new file mode 100644 index 0000000..f0e7da5 --- /dev/null +++ b/alfred/cmd/core/app/service/filter_navi_apple_os.go @@ -0,0 +1,63 @@ +package service + +import ( + "alfred/api/response" + "alfred/cmd/core/app/helper" + "alfred/pkg/cache" + "alfred/pkg/log" + "alfred/repositoryAccessLayer" + "alfred/utils" + "go.uber.org/zap" +) + +type FilterNaviAppIos struct { + sessionAccessLayer repositoryAccessLayer.SessionsAccessLayer + cacheClient cache.ConfigClientInterface +} + +func NewFilterNaviAppIos(repositories *repositoryAccessLayer.RepositoryAccessLayer) *FilterNaviAppIos { + return &FilterNaviAppIos{ + sessionAccessLayer: repositories.SessionsAccessLayer, + cacheClient: cache.NewCacheConfig(), + } +} + +func (fn *FilterNaviAppIos) CreateFilterResponse(eventIngestionIndex string) []response.FilterResponseData { + return []response.FilterResponseData{ + { + FilterName: "App Version Code", + FilterKey: "app_version_code", + FilterData: fn.getFilterValuesFromSession("base_attributes.app_version_code", eventIngestionIndex), + SelectionConfig: response.MULTI_SELECT, + }, + { + FilterName: "Select Dates", + FilterKey: "date_range_picker", + SelectionConfig: response.RANGE_PICKER, + }, + } +} + +func (fn *FilterNaviAppIos) getFilterValuesFromSession(key string, sessionUploadIndex string) []response.FilterData { + values := fn.getUniqueValueFromSession(key, sessionUploadIndex) + return helper.MapToFilterData(values) +} +func (fn *FilterNaviAppIos) getUniqueValueFromSession(key string, sessionUploadIndex string) (values []string) { + cacheKey := key + utils.NAVI_USER_APP_IOS + valueInCache, found := fn.cacheClient.Get(cacheKey) + if found { + values = valueInCache.([]string) + } else { + valuesFromDb := []string{} + err := error(nil) + valuesFromDb, err = fn.sessionAccessLayer.FetchUniqueKeys(key, sessionUploadIndex) + if err != nil { + log.Error("error while fetching unique keys for ios", zap.Error(err)) + return nil + } + fn.cacheClient.PutWithDefaultTtl(cacheKey, valuesFromDb) + values = valuesFromDb + } + return values + +} diff --git a/alfred/cmd/core/app/service/filter_web.go b/alfred/cmd/core/app/service/filter_web.go new file mode 100644 index 0000000..8220e31 --- /dev/null +++ b/alfred/cmd/core/app/service/filter_web.go @@ -0,0 +1,24 @@ +package service + +import "alfred/api/response" + +type FilterWeb interface { + CreateFilterResponse() []response.FilterResponseData +} + +type FilterWebImpl struct { +} + +func NewFilterWeb() FilterWeb { + return &FilterWebImpl{} +} + +func (fw *FilterWebImpl) CreateFilterResponse() []response.FilterResponseData { + return []response.FilterResponseData{ + { + FilterName: "Select Dates", + FilterKey: "date_range_picker", + SelectionConfig: response.RANGE_PICKER, + }, + } +} diff --git a/alfred/cmd/core/app/service/interfaces/app_client.go b/alfred/cmd/core/app/service/interfaces/app_client.go new file mode 100644 index 0000000..0a796b9 --- /dev/null +++ b/alfred/cmd/core/app/service/interfaces/app_client.go @@ -0,0 +1,13 @@ +package interfaces + +import ( + "alfred/api/request" + "alfred/api/response" + "alfred/model/es" +) + +type AppClient interface { + FetchSessionDetails(filters request.SessionFilters, page *es.Page, + sessionUploadIndex string, eventIngestionIndex string) ([]es.SessionResponse, error) + CreateBucketsForSessionForResponse(sessions []es.SessionResponse) (sessionResponseDataList []response.SearchSessionResponseData, err error) +} diff --git a/alfred/cmd/core/app/service/masking_service.go b/alfred/cmd/core/app/service/masking_service.go new file mode 100644 index 0000000..3862071 --- /dev/null +++ b/alfred/cmd/core/app/service/masking_service.go @@ -0,0 +1,85 @@ +package service + +import ( + "alfred/cmd/core/app/external" + "alfred/cmd/core/app/helper" + "alfred/cmd/core/app/masking" + "alfred/config" + "alfred/internal/clients" + "alfred/model/es" + "alfred/pkg/s3" + "alfred/repositoryAccessLayer" + "alfred/utils" + "errors" + "fmt" + "sync" +) + +type MaskingService interface { + MaskImages(pathToUnzippedFiles string, eventsFromSession []es.EventResponse, zipName, clientName, imageType string, isAsyncDSMaskingEnabled bool) (string, bool, error) +} + +type MaskingServiceImpl struct { + sessionsAccessLayer repositoryAccessLayer.SessionsAccessLayer + eventsAccessLayer repositoryAccessLayer.EventsAccessLayer + videoGenerationStatusAccessLayer repositoryAccessLayer.VideoGenerationStatusAccessLayer + s3Client s3.S3Client + maskingStrategy masking.Masking +} + +func NewMaskingServiceImpl(repositories *repositoryAccessLayer.RepositoryAccessLayer, s3Client s3.S3Client, httpClient *clients.HttpClient) *MaskingServiceImpl { + return &MaskingServiceImpl{ + sessionsAccessLayer: repositories.SessionsAccessLayer, + eventsAccessLayer: repositories.EventsAccessLayer, + s3Client: s3Client, + videoGenerationStatusAccessLayer: repositories.VideoGenerationStatusAccessLayer, + maskingStrategy: masking.Masking{DataScienceService: external.NewDataScienceService(httpClient, s3Client)}, + } +} + +func (m *MaskingServiceImpl) MaskImages(pathToUnzippedFiles string, eventsFromSession []es.EventResponse, zipName, clientName, imageType string, isAsyncDSMaskingEnabled bool) (string, bool, error) { + + screens := helper.GetKeysOfMap(config.GetCoreConfig().MaskingConfig.MaskedScreensBlurScreenRatioMap) + screenshotPathsToBeMasked, screenshotNamesToBeMasked := helper.GetScreenshotsToBeMaskedByScreens(eventsFromSession, pathToUnzippedFiles, imageType, screens) + + var wg sync.WaitGroup + var errorsList []error + + boolMasked := false + + for screen, screenshotPathList := range screenshotPathsToBeMasked { + maskingMode := config.GetCoreConfig().MaskingConfig.MaskedScreensStrategyMap[screen] + if maskingMode == utils.BLUR_MODE && isAsyncDSMaskingEnabled { + continue + } + m.maskingStrategy.SetStrategy(screen, m.s3Client, clientName, maskingMode, isAsyncDSMaskingEnabled) + wg.Add(1) + go func(screen string, screenshotPathList, screenshotNameList []string) { + defer wg.Done() + isMasked, err := m.maskingStrategy.ExecuteMaskingscreenshots(screenshotPathList, screenshotNameList, pathToUnzippedFiles, screen, zipName, imageType) + if !isAsyncDSMaskingEnabled && maskingMode == utils.DS_MODE && err != nil { + m.maskingStrategy.SetStrategy(screen, m.s3Client, clientName, utils.BLUR_MODE, isAsyncDSMaskingEnabled) + isMasked, err = m.maskingStrategy.ExecuteMaskingscreenshots(screenshotPathList, screenshotNameList, pathToUnzippedFiles, screen, zipName, imageType) + } + if err != nil { + errorsList = append(errorsList, err) + } + if isMasked { + boolMasked = true + } + }(screen, screenshotPathList, screenshotNamesToBeMasked[screen]) + + } + + wg.Wait() + + if len(errorsList) > 0 { + errMsg := "Errors occurred during processing:\n" + for _, err := range errorsList { + errMsg += fmt.Sprintf("- %v\n", err) + } + return pathToUnzippedFiles, false, errors.New(errMsg) + } + + return pathToUnzippedFiles, boolMasked, nil +} diff --git a/alfred/cmd/core/main.go b/alfred/cmd/core/main.go new file mode 100644 index 0000000..7262ba7 --- /dev/null +++ b/alfred/cmd/core/main.go @@ -0,0 +1,60 @@ +package main + +import ( + "alfred/cmd/core/app" + "alfred/config" + "alfred/internal/clients" + "alfred/pkg/log" + "alfred/pkg/s3" + "alfred/repository" + "alfred/repositoryAccessLayer" + "os" + "time" + + ginzap "github.com/gin-contrib/zap" + "github.com/gin-gonic/gin" + "github.com/spf13/cobra" + _ "go.uber.org/automaxprocs" + "go.uber.org/zap" +) + +func main() { + log.InitLogger("alfred-core") + config.LoadCoreConfig() + + command := &cobra.Command{ + Use: "alfred-core", + Short: "alfred core serves cruise control, internal requests", + Long: "alfred core serves cruise control, internal requests", + RunE: func(cmd *cobra.Command, args []string) error { + r := gin.New() + + r.Use(ginzap.Ginzap(log.GetLogger(), time.RFC3339, true)) + + r.Use(ginzap.RecoveryWithZap(log.GetLogger(), true)) + + s3Client := s3.NewS3Client() + httpClient := clients.NewHttpClient(config.GetCoreConfig().HttpConfig) + mjolnirClient := clients.NewMjolnirClient(httpClient.HttpClient, + config.GetCoreConfig().OutboundServiceConfig.MjolnirServiceUrl, config.GetCoreConfig().OutboundServiceConfig.MjolnirRealmId, + ) + + ingestorClient := clients.NewAlfredIngestorClient(httpClient.HttpClient) + esConfig := config.GetCoreConfig().ElasticSearchConfig.BaseConfig + repositories := repository.InitRepositories(esConfig) + repositoryAccessLayer := repositoryAccessLayer.InitRepositoryAccessLayer(repositories) + + sv := app.NewServer(r, repositoryAccessLayer, s3Client, httpClient, mjolnirClient, ingestorClient) + + sv.Handler() + sv.Start() + + return nil + }, + } + + if err := command.Execute(); err != nil { + log.Error("alfred core command execution failed", zap.Error(err)) + os.Exit(1) + } +} diff --git a/alfred/cmd/ferret/app/dependency/dependencies.go b/alfred/cmd/ferret/app/dependency/dependencies.go new file mode 100644 index 0000000..5609f42 --- /dev/null +++ b/alfred/cmd/ferret/app/dependency/dependencies.go @@ -0,0 +1,20 @@ +package dependency + +import ( + "alfred/config" + kafka "alfred/pkg/kafka/produce" +) + +type FerretDependencies struct { + Handlers *FerretHandlerDep +} + +func InitFerretDependencies() *FerretDependencies { + kafkaProducer := kafka.NewKProducer(config.GetFerretConfig().BaseConfig.Env, config.GetFerretConfig().KafkaConfig.BaseConfig) + validators := InitFerretValidatorDependency() + services := InitFerretServicesDependency(kafkaProducer, validators) + handlers := InitFerretHandlerDependency(services, validators) + return &FerretDependencies{ + Handlers: handlers, + } +} diff --git a/alfred/cmd/ferret/app/dependency/handlers.go b/alfred/cmd/ferret/app/dependency/handlers.go new file mode 100644 index 0000000..6a81482 --- /dev/null +++ b/alfred/cmd/ferret/app/dependency/handlers.go @@ -0,0 +1,13 @@ +package dependency + +import "alfred/cmd/ferret/app/handlers" + +type FerretHandlerDep struct { + AlfredErrorEventsHandler handlers.AlfredErrorEventsHandler +} + +func InitFerretHandlerDependency(service *FerretServicesDep, validators *FerretValidatorDep) *FerretHandlerDep { + return &FerretHandlerDep{ + AlfredErrorEventsHandler: handlers.NewAlfredErrorEventsHandler(service.alfredErrorEventsService, validators.headersValidator), + } +} diff --git a/alfred/cmd/ferret/app/dependency/services.go b/alfred/cmd/ferret/app/dependency/services.go new file mode 100644 index 0000000..d5bfd30 --- /dev/null +++ b/alfred/cmd/ferret/app/dependency/services.go @@ -0,0 +1,17 @@ +package dependency + +import ( + "alfred/cmd/ferret/app/service" + "alfred/cmd/ferret/app/service/interfaces" + kafka "alfred/pkg/kafka/produce" +) + +type FerretServicesDep struct { + alfredErrorEventsService interfaces.AlfredErrorEventsService +} + +func InitFerretServicesDependency(kafkaProducer kafka.KProducer, validators *FerretValidatorDep) *FerretServicesDep { + return &FerretServicesDep{ + alfredErrorEventsService: service.NewAlfredErrorEventsService(kafkaProducer, validators.bodyValidator), + } +} diff --git a/alfred/cmd/ferret/app/dependency/validator.go b/alfred/cmd/ferret/app/dependency/validator.go new file mode 100644 index 0000000..e560236 --- /dev/null +++ b/alfred/cmd/ferret/app/dependency/validator.go @@ -0,0 +1,15 @@ +package dependency + +import "alfred/cmd/ferret/app/validator" + +type FerretValidatorDep struct { + headersValidator validator.HeadersValidator + bodyValidator validator.BodyValidator +} + +func InitFerretValidatorDependency() *FerretValidatorDep { + return &FerretValidatorDep{ + headersValidator: validator.NewHeadersValidator(), + bodyValidator: validator.NewBodyValidator(), + } +} diff --git a/alfred/cmd/ferret/app/handlers/alfred_error_events_handler.go b/alfred/cmd/ferret/app/handlers/alfred_error_events_handler.go new file mode 100644 index 0000000..ac23037 --- /dev/null +++ b/alfred/cmd/ferret/app/handlers/alfred_error_events_handler.go @@ -0,0 +1,36 @@ +package handlers + +import ( + "alfred/cmd/ferret/app/service/interfaces" + "alfred/cmd/ferret/app/validator" + "alfred/pkg/log" + "alfred/utils" + "github.com/gin-gonic/gin" + "net/http" +) + +type AlfredErrorEventsHandler interface { + IngestErrorEvents(c *gin.Context) +} + +type AlfredErrorEventsHandlerImpl struct { + alfredErrorEventsService interfaces.AlfredErrorEventsService + headersValidator validator.HeadersValidator +} + +func NewAlfredErrorEventsHandler(alfredErrorEventsService interfaces.AlfredErrorEventsService, headersValidator validator.HeadersValidator) *AlfredErrorEventsHandlerImpl { + return &AlfredErrorEventsHandlerImpl{ + alfredErrorEventsService: alfredErrorEventsService, + headersValidator: headersValidator, + } +} + +func (aeeh *AlfredErrorEventsHandlerImpl) IngestErrorEvents(c *gin.Context) { + client, err := aeeh.headersValidator.ValidateAPIKeyHeaders(c) + if err != nil { + log.Error("error-events could not be ingested as client was not mapped") + c.JSON(http.StatusBadRequest, utils.ErrorResponse(err, http.StatusBadRequest, nil)) + return + } + aeeh.alfredErrorEventsService.IngestErrorEvents(c, client) +} diff --git a/alfred/cmd/ferret/app/server.go b/alfred/cmd/ferret/app/server.go new file mode 100644 index 0000000..00a309a --- /dev/null +++ b/alfred/cmd/ferret/app/server.go @@ -0,0 +1,70 @@ +package app + +import ( + "alfred/cmd/ferret/app/dependency" + "alfred/config" + "alfred/internal/metrics" + "alfred/pkg/log" + "alfred/utils" + "fmt" + "github.com/gin-gonic/gin" + "go.elastic.co/apm/module/apmgin/v2" + "go.uber.org/zap" + "net/http" + "strconv" + "time" +) + +type Server struct { + gin *gin.Engine + dependencies dependency.FerretDependencies + metricsMiddleware *gin.RouterGroup +} + +func NewServer(gin *gin.Engine) *Server { + dependencies := dependency.InitFerretDependencies() + return &Server{ + gin: gin, + dependencies: *dependencies, + } +} + +func (s *Server) Start() { + s.Handler() + log.Info("starting alfred ferret server", zap.String("port", strconv.Itoa(config.GetFerretConfig().BaseConfig.Port))) + err := s.gin.Run(fmt.Sprintf(":%v", config.GetFerretConfig().BaseConfig.Port)) + if err != nil { + log.Error("error starting alfred ferret server", zap.Error(err)) + } +} + +func (s *Server) Handler() { + s.gin.Use(apmgin.Middleware(s.gin)) + s.metricsMiddleware = s.createMetricMiddleware() + s.Router() + metrics.AdminHandler(config.GetFerretConfig().BaseConfig.MetricPort) + s.healthCheckHandler() +} + +func (s *Server) Router() { + s.metricsMiddleware.POST(utils.ERROR_EVENTS_API, s.dependencies.Handlers.AlfredErrorEventsHandler.IngestErrorEvents) +} + +func (s *Server) createMetricMiddleware() *gin.RouterGroup { + return s.gin.Group(utils.EMPTY, func(c *gin.Context) { + startTime := time.Now() + c.Next() + endTime := float64(time.Since(startTime)) + status := strconv.Itoa(c.Writer.Status()) + metrics.AlfredApiRequestCounter.WithLabelValues(c.Request.URL.Path, c.Request.Method, status).Inc() + metrics.AlfredApiRequestLatencySum.WithLabelValues(c.Request.URL.Path, c.Request.Method, status).Add(endTime) + metrics.AlfredApiRequestLatencyHistogram.WithLabelValues(c.Request.URL.Path, c.Request.Method, status).Observe(endTime) + metrics.AlfredApiRequestLatencySummary.WithLabelValues(c.Request.URL.Path, c.Request.Method, status).Observe(endTime) + }) +} + +func (s *Server) healthCheckHandler() { + s.metricsMiddleware.GET(utils.PING, func(c *gin.Context) { + c.String(http.StatusOK, utils.PONG) + }) +} diff --git a/alfred/cmd/ferret/app/service/alfred_error_events_service.go b/alfred/cmd/ferret/app/service/alfred_error_events_service.go new file mode 100644 index 0000000..7b03974 --- /dev/null +++ b/alfred/cmd/ferret/app/service/alfred_error_events_service.go @@ -0,0 +1,45 @@ +package service + +import ( + "alfred/cmd/ferret/app/validator" + "alfred/config" + "alfred/model/common" + "alfred/model/ferret" + kafka "alfred/pkg/kafka/produce" + "alfred/pkg/log" + "alfred/utils" + "github.com/gin-gonic/gin" + "net/http" +) + +type AlfredErrorEventsServiceImpl struct { + kafkaProducer kafka.KProducer + bodyValidators validator.BodyValidator +} + +func NewAlfredErrorEventsService(producer kafka.KProducer, bodyValidator validator.BodyValidator) *AlfredErrorEventsServiceImpl { + return &AlfredErrorEventsServiceImpl{ + kafkaProducer: producer, + bodyValidators: bodyValidator, + } +} + +func (aees *AlfredErrorEventsServiceImpl) IngestErrorEvents(c *gin.Context, clientName string) { + var requestBody ferret.ErrorEventsAttributes + if err := c.ShouldBindJSON(&requestBody); err != nil { + c.JSON(http.StatusBadRequest, utils.ErrorResponse(err, http.StatusBadRequest, nil)) + return + } + + requestBody.ErrorAttribute.ClientName = ferret.ClientName(clientName) + if err := aees.bodyValidators.ValidateErrorEventsRequest(c, requestBody); err != nil { + c.JSON(http.StatusBadRequest, utils.ErrorResponse(&common.InvalidRequestError{}, http.StatusBadRequest, nil)) + return + } + + err := aees.kafkaProducer.SendMessage(requestBody, config.GetFerretConfig().KafkaConfig.ErrorEventsUploadTopic, requestBody.ErrorAttribute.DeviceId, clientName) + if err != nil { + log.Error("Error while publishing alfred error-events to kafka") + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, nil)) + } +} diff --git a/alfred/cmd/ferret/app/service/interfaces/alfred_error_events_service.go b/alfred/cmd/ferret/app/service/interfaces/alfred_error_events_service.go new file mode 100644 index 0000000..f0cf288 --- /dev/null +++ b/alfred/cmd/ferret/app/service/interfaces/alfred_error_events_service.go @@ -0,0 +1,7 @@ +package interfaces + +import "github.com/gin-gonic/gin" + +type AlfredErrorEventsService interface { + IngestErrorEvents(c *gin.Context, clientName string) +} diff --git a/alfred/cmd/ferret/app/validator/body_validator.go b/alfred/cmd/ferret/app/validator/body_validator.go new file mode 100644 index 0000000..6781bd9 --- /dev/null +++ b/alfred/cmd/ferret/app/validator/body_validator.go @@ -0,0 +1,32 @@ +package validator + +import ( + "alfred/model/ferret" + "alfred/pkg/log" + "alfred/utils" + "errors" + "fmt" + "github.com/gin-gonic/gin" + "go.uber.org/zap" +) + +type BodyValidator interface { + ValidateErrorEventsRequest(c *gin.Context, requestBody ferret.ErrorEventsAttributes) error +} + +type BodyValidatorImpl struct { +} + +func NewBodyValidator() *BodyValidatorImpl { + return &BodyValidatorImpl{} +} + +func (bv BodyValidatorImpl) ValidateErrorEventsRequest(c *gin.Context, requestBody ferret.ErrorEventsAttributes) error { + isPresentRequestBody := !(requestBody.ErrorAttribute.DeviceId == utils.EMPTY) + if !isPresentRequestBody { + log.Error("[INGEST_ERROR_EVENTS] bad request exception", zap.String("body", fmt.Sprintf("%v", requestBody))) + err := errors.New("Field Missing in Request Body") + return err + } + return nil +} diff --git a/alfred/cmd/ferret/app/validator/headers_validator.go b/alfred/cmd/ferret/app/validator/headers_validator.go new file mode 100644 index 0000000..e2a6131 --- /dev/null +++ b/alfred/cmd/ferret/app/validator/headers_validator.go @@ -0,0 +1,34 @@ +package validator + +import ( + "alfred/config" + "alfred/utils" + "errors" + "github.com/gin-gonic/gin" +) + +type HeadersValidator interface { + ValidateAPIKeyHeaders(c *gin.Context) (string, error) +} + +type HeadersValidatorImpl struct{} + +func NewHeadersValidator() *HeadersValidatorImpl { + return &HeadersValidatorImpl{} +} + +func (hv *HeadersValidatorImpl) ValidateAPIKeyHeaders(c *gin.Context) (string, error) { + apiKey := c.Request.Header.Get(utils.X_API_KEY) + apiKeyClientMap := config.GetFerretConfig().ApiKeyConfig + + value, found := apiKeyClientMap[apiKey] + if !found { + return utils.EMPTY, errors.New(utils.INVALID_CLIENT) + } + clientName, err := utils.GetStringValue(value) + if err != nil { + return utils.EMPTY, errors.New("client name could not be parsed") + } + + return clientName, nil +} diff --git a/alfred/cmd/ferret/main.go b/alfred/cmd/ferret/main.go new file mode 100644 index 0000000..5b3abf1 --- /dev/null +++ b/alfred/cmd/ferret/main.go @@ -0,0 +1,37 @@ +package main + +import ( + "alfred/cmd/ferret/app" + "alfred/config" + "alfred/pkg/log" + ginzap "github.com/gin-contrib/zap" + "github.com/gin-gonic/gin" + "github.com/spf13/cobra" + "go.uber.org/zap" + "os" + "time" +) + +func main() { + log.InitLogger("alfred-ferret") + config.LoadFerretConfig() + + command := &cobra.Command{ + Use: "alfred-ferret", + Short: "alfred ferret receive all error events and ingest them into kafka", + Long: "alfred ferret receive all error events and ingest them into kafka", + RunE: func(cmd *cobra.Command, args []string) error { + r := gin.New() + r.Use(ginzap.RecoveryWithZap(log.GetLogger(), true)) + r.Use(ginzap.Ginzap(log.GetLogger(), time.RFC3339, true)) + sv := app.NewServer(r) + sv.Start() + + return nil + }, + } + if err := command.Execute(); err != nil { + log.Error("alfred ferret main command execution failed", zap.Error(err)) + os.Exit(1) + } +} diff --git a/alfred/cmd/ingester/app/adapter/cosmos_adapter/CosmosAppEventAdapter.go b/alfred/cmd/ingester/app/adapter/cosmos_adapter/CosmosAppEventAdapter.go new file mode 100644 index 0000000..38578af --- /dev/null +++ b/alfred/cmd/ingester/app/adapter/cosmos_adapter/CosmosAppEventAdapter.go @@ -0,0 +1,67 @@ +package cosmos_adapter + +import ( + "alfred/cmd/ingester/app/model/cosmos" + "alfred/model/ingester" +) + +type CosmosAppEventAdapter interface { + AdaptAppEventRequestCosmos() (ingester.AppEvent, error) +} + +type CosmosAppEventAdapterImpl struct { + CosmosAppEvent cosmos.AppEvent +} + +func (s CosmosAppEventAdapterImpl) AdaptAppEventRequestCosmos() (ingester.AppEvent, error) { + + mappedEvents := make([]ingester.EventAttributes, len(s.CosmosAppEvent.Events)) + for i, cosmosEvent := range s.CosmosAppEvent.Events { + mappedEvent := ingester.EventAttributes{ + EventId: cosmosEvent.EventId, + ParentSessionId: cosmosEvent.ParentSessionId, + SessionId: cosmosEvent.SessionId, + ScreenName: cosmosEvent.ScreenName, + ScreenshotTime: cosmosEvent.ScreenshotTime, + ModuleName: cosmosEvent.ModuleName, + EventName: cosmosEvent.EventName, + EventTimestamp: cosmosEvent.EventTimestamp, + Attributes: cosmosEvent.Attributes, + EventType: cosmosEvent.EventType, + ZipName: cosmosEvent.ZipName, + } + mappedEvents[i] = mappedEvent + } + + mappedRequest := ingester.AppEvent{ + BaseAttributes: ingester.BaseAttributes{ + AppVersionCode: s.CosmosAppEvent.BaseAttributes.AppVersionCode, + AppVersionName: s.CosmosAppEvent.BaseAttributes.AppVersionName, + ClientTs: s.CosmosAppEvent.BaseAttributes.ClientTs, + DeviceId: s.CosmosAppEvent.BaseAttributes.DeviceId, + DeviceModel: s.CosmosAppEvent.BaseAttributes.DeviceModel, + DeviceManufacturer: s.CosmosAppEvent.BaseAttributes.DeviceManufacturer, + ScreenResolution: s.CosmosAppEvent.BaseAttributes.ScreenResolution, + AppOS: s.CosmosAppEvent.BaseAttributes.AppOS, + OsVersion: s.CosmosAppEvent.BaseAttributes.OsVersion, + Latitude: s.CosmosAppEvent.BaseAttributes.Latitude, + Longitude: s.CosmosAppEvent.BaseAttributes.Longitude, + NetworkType: s.CosmosAppEvent.BaseAttributes.NetworkType, + CustomerId: s.CosmosAppEvent.BaseAttributes.AgentId, + UpTime: s.CosmosAppEvent.BaseAttributes.UpTime, + CarrierName: s.CosmosAppEvent.BaseAttributes.CarrierName, + Metadata: s.CosmosAppEvent.BaseAttributes.Metadata, + SessionTimeStamp: s.CosmosAppEvent.BaseAttributes.SessionTimeStamp, + EventTimestamp: s.CosmosAppEvent.BaseAttributes.EventTimestamp, + SessionId: s.CosmosAppEvent.BaseAttributes.SessionId, + ParentSessionId: s.CosmosAppEvent.BaseAttributes.ParentSessionId, + TraceId: s.CosmosAppEvent.BaseAttributes.TraceId, + PhoneNumber: s.CosmosAppEvent.BaseAttributes.PhoneNumber, + SnapshotPerSecond: s.CosmosAppEvent.BaseAttributes.SnapshotPerSecond, + ImageType: s.CosmosAppEvent.BaseAttributes.ImageType, + FileTypeExtension: s.CosmosAppEvent.BaseAttributes.FileTypeExtension, + }, + Events: mappedEvents, + } + return mappedRequest, nil +} diff --git a/alfred/cmd/ingester/app/adapter/cosmos_adapter/CosmosAppEventAdapter_test.go b/alfred/cmd/ingester/app/adapter/cosmos_adapter/CosmosAppEventAdapter_test.go new file mode 100644 index 0000000..939b8d4 --- /dev/null +++ b/alfred/cmd/ingester/app/adapter/cosmos_adapter/CosmosAppEventAdapter_test.go @@ -0,0 +1,47 @@ +package cosmos_adapter + +import ( + "alfred/cmd/ingester/app/model/cosmos" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestAdaptAppEventRequestCosmos(t *testing.T) { + // Create a sample NaviAppEvent to use in the test + cosmosEvent := cosmos.AppEvent{ + BaseAttributes: cosmos.BaseAttributes{ + AppVersionCode: "1.0", + AppVersionName: "SampleApp", + ClientTs: 123456789, + // Initialize other fields as needed + }, + Events: []cosmos.EventAttributes{ + cosmos.EventAttributes{ + EventId: "event1", + ParentSessionId: "parent1", + SessionId: "session1", + ScreenName: "screen1", + ModuleName: "module1", + EventName: "event1", + EventTimestamp: 123456789, + Attributes: map[string]interface{}{}, + EventType: "type1", + }, + // Add more sample events as needed + }, + } + + // Create a CosmosAppEventAdapterImpl with the sample data + adapter := CosmosAppEventAdapterImpl{CosmosAppEvent: cosmosEvent} + + // Call the function to be tested + result, err := adapter.AdaptAppEventRequestCosmos() + + // Check for no errors + assert.NoError(t, err, "AdaptAppEventRequest() should not return an error") + + // Check the correctness of the result + assert.Equal(t, cosmosEvent.BaseAttributes.AppVersionCode, result.BaseAttributes.AppVersionCode) + assert.Equal(t, len(cosmosEvent.Events), len(result.Events), "Length of mappedEvents does not match the original Events") + +} diff --git a/alfred/cmd/ingester/app/adapter/cosmos_adapter/CosmosSessionUploadRequestAdapter.go b/alfred/cmd/ingester/app/adapter/cosmos_adapter/CosmosSessionUploadRequestAdapter.go new file mode 100644 index 0000000..9d3da75 --- /dev/null +++ b/alfred/cmd/ingester/app/adapter/cosmos_adapter/CosmosSessionUploadRequestAdapter.go @@ -0,0 +1,64 @@ +package cosmos_adapter + +import ( + "alfred/cmd/ingester/app/model/cosmos" + "alfred/model/ingester" +) + +type CosmosSessionUploadRequestAdapter interface { + AdaptSessionUploadRequestCosmos() (ingester.SessionUploadRequest, error) +} + +type CosmosSessionUploadRequestAdapterImpl struct { + CosmosSessionUploadRequest cosmos.SessionUploadRequest +} + +func (s CosmosSessionUploadRequestAdapterImpl) AdaptSessionUploadRequestCosmos() (ingester.SessionUploadRequest, error) { + mappedRequest := ingester.SessionUploadRequest{ + BaseAttributes: ingester.BaseAttributes{ + AppVersionCode: s.CosmosSessionUploadRequest.BaseAttributes.AppVersionCode, + AppVersionName: s.CosmosSessionUploadRequest.BaseAttributes.AppVersionName, + ClientTs: s.CosmosSessionUploadRequest.BaseAttributes.ClientTs, + DeviceId: s.CosmosSessionUploadRequest.BaseAttributes.DeviceId, + DeviceModel: s.CosmosSessionUploadRequest.BaseAttributes.DeviceModel, + DeviceManufacturer: s.CosmosSessionUploadRequest.BaseAttributes.DeviceManufacturer, + ScreenResolution: s.CosmosSessionUploadRequest.BaseAttributes.ScreenResolution, + AppOS: s.CosmosSessionUploadRequest.BaseAttributes.AppOS, + OsVersion: s.CosmosSessionUploadRequest.BaseAttributes.OsVersion, + Latitude: s.CosmosSessionUploadRequest.BaseAttributes.Latitude, + Longitude: s.CosmosSessionUploadRequest.BaseAttributes.Longitude, + NetworkType: s.CosmosSessionUploadRequest.BaseAttributes.NetworkType, + CustomerId: s.CosmosSessionUploadRequest.BaseAttributes.AgentId, + UpTime: s.CosmosSessionUploadRequest.BaseAttributes.UpTime, + CarrierName: s.CosmosSessionUploadRequest.BaseAttributes.CarrierName, + Metadata: s.CosmosSessionUploadRequest.BaseAttributes.Metadata, + SessionTimeStamp: s.CosmosSessionUploadRequest.BaseAttributes.SessionTimeStamp, + EventTimestamp: s.CosmosSessionUploadRequest.BaseAttributes.EventTimestamp, + SessionId: s.CosmosSessionUploadRequest.BaseAttributes.SessionId, + ParentSessionId: s.CosmosSessionUploadRequest.BaseAttributes.ParentSessionId, + TraceId: s.CosmosSessionUploadRequest.BaseAttributes.TraceId, + EventEndTimeStamp: s.CosmosSessionUploadRequest.BaseAttributes.EventEndTimeStamp, + PhoneNumber: s.CosmosSessionUploadRequest.BaseAttributes.PhoneNumber, + HasErrors: s.CosmosSessionUploadRequest.BaseAttributes.HasErrors, + SnapshotPerSecond: s.CosmosSessionUploadRequest.BaseAttributes.SnapshotPerSecond, + ImageType: s.CosmosSessionUploadRequest.BaseAttributes.ImageType, + FileTypeExtension: s.CosmosSessionUploadRequest.BaseAttributes.FileTypeExtension, + }, + SessionUploadEventAttributes: ingester.SessionUploadEventAttributes{ + BeginningDeviceAttributes: ingester.DeviceAttributes{ + Battery: s.CosmosSessionUploadRequest.SessionUploadEventAttributes.BeginningDeviceAttributes.Battery, + Cpu: s.CosmosSessionUploadRequest.SessionUploadEventAttributes.BeginningDeviceAttributes.Cpu, + Memory: s.CosmosSessionUploadRequest.SessionUploadEventAttributes.BeginningDeviceAttributes.Memory, + Storage: s.CosmosSessionUploadRequest.SessionUploadEventAttributes.BeginningDeviceAttributes.Storage, + }, + EndDeviceAttributes: ingester.DeviceAttributes{ + Battery: s.CosmosSessionUploadRequest.SessionUploadEventAttributes.EndDeviceAttributes.Battery, + Cpu: s.CosmosSessionUploadRequest.SessionUploadEventAttributes.EndDeviceAttributes.Cpu, + Memory: s.CosmosSessionUploadRequest.SessionUploadEventAttributes.EndDeviceAttributes.Memory, + Storage: s.CosmosSessionUploadRequest.SessionUploadEventAttributes.EndDeviceAttributes.Storage, + }, + EventId: s.CosmosSessionUploadRequest.SessionUploadEventAttributes.EventId, + }, + } + return mappedRequest, nil +} diff --git a/alfred/cmd/ingester/app/adapter/cosmos_adapter/CosmosSessionUploadRequestAdapter_test.go b/alfred/cmd/ingester/app/adapter/cosmos_adapter/CosmosSessionUploadRequestAdapter_test.go new file mode 100644 index 0000000..33251ca --- /dev/null +++ b/alfred/cmd/ingester/app/adapter/cosmos_adapter/CosmosSessionUploadRequestAdapter_test.go @@ -0,0 +1,49 @@ +package cosmos_adapter + +import ( + "alfred/cmd/ingester/app/model/cosmos" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestAdaptSessionUploadRequestCosmos(t *testing.T) { + // Create a sample NaviSessionUploadRequest to use in the test + cosmosSessionUploadRequest := cosmos.SessionUploadRequest{ + BaseAttributes: cosmos.BaseAttributes{ + AppVersionCode: "1.0", + AppVersionName: "SampleApp", + ClientTs: 123456789, + // Initialize other fields as needed + }, + SessionUploadEventAttributes: cosmos.SessionUploadEventAttributes{ + BeginningDeviceAttributes: cosmos.DeviceAttributes{ + Battery: 50.0, + Cpu: 0.8, + Memory: 2048, + Storage: 65536, + }, + EndDeviceAttributes: cosmos.DeviceAttributes{ + Battery: 30.0, + Cpu: 0.9, + Memory: 3072, + Storage: 81920, + }, + EventId: "event1", + }, + } + + // Create a CosmosSessionUploadRequestAdapterImpl with the sample data + adapter := CosmosSessionUploadRequestAdapterImpl{CosmosSessionUploadRequest: cosmosSessionUploadRequest} + + // Call the function to be tested + result, err := adapter.AdaptSessionUploadRequestCosmos() + + // Use assertions to check for correctness + assert.NoError(t, err, "AdaptSessionUploadRequest() should not return an error") + + // Check the correctness of the result + assert.Equal(t, cosmosSessionUploadRequest.BaseAttributes.AppVersionCode, result.BaseAttributes.AppVersionCode) + assert.Equal(t, cosmosSessionUploadRequest.SessionUploadEventAttributes.BeginningDeviceAttributes.Battery, result.SessionUploadEventAttributes.BeginningDeviceAttributes.Battery) + assert.Equal(t, cosmosSessionUploadRequest.SessionUploadEventAttributes.EventId, result.SessionUploadEventAttributes.EventId) + +} diff --git a/alfred/cmd/ingester/app/adapter/navi_adapter/NaviAppEventAdapter.go b/alfred/cmd/ingester/app/adapter/navi_adapter/NaviAppEventAdapter.go new file mode 100644 index 0000000..cf501ed --- /dev/null +++ b/alfred/cmd/ingester/app/adapter/navi_adapter/NaviAppEventAdapter.go @@ -0,0 +1,66 @@ +package navi_adapter + +import ( + "alfred/cmd/ingester/app/model/navi" + "alfred/model/ingester" +) + +type NaviAppEventAdapter interface { + AdaptAppEventRequestNavi() (ingester.AppEvent, error) +} + +type NaviAppEventAdapterImpl struct { + NaviAppEvent navi.AppEvent +} + +func (n NaviAppEventAdapterImpl) AdaptAppEventRequestNavi() (ingester.AppEvent, error) { + + mappedEvents := make([]ingester.EventAttributes, len(n.NaviAppEvent.Events)) + for i, naviEvent := range n.NaviAppEvent.Events { + mappedEvent := ingester.EventAttributes{ + EventId: naviEvent.EventId, + ParentSessionId: naviEvent.ParentSessionId, + SessionId: naviEvent.SessionId, + ScreenName: naviEvent.ScreenName, + ScreenshotTime: naviEvent.ScreenshotTime, + FragmentList: naviEvent.FragmentList, + ModuleName: naviEvent.ModuleName, + EventName: naviEvent.EventName, + EventTimestamp: naviEvent.EventTimestamp, + Attributes: naviEvent.Attributes, + EventType: naviEvent.EventType, + ZipName: naviEvent.ZipName, + } + mappedEvents[i] = mappedEvent + } + + mappedRequest := ingester.AppEvent{ + BaseAttributes: ingester.BaseAttributes{ + AppVersionCode: n.NaviAppEvent.BaseAttributes.AppVersionCode, + AppVersionName: n.NaviAppEvent.BaseAttributes.AppVersionName, + ClientTs: n.NaviAppEvent.BaseAttributes.ClientTs, + DeviceId: n.NaviAppEvent.BaseAttributes.DeviceId, + DeviceModel: n.NaviAppEvent.BaseAttributes.DeviceModel, + DeviceManufacturer: n.NaviAppEvent.BaseAttributes.DeviceManufacturer, + ScreenResolution: n.NaviAppEvent.BaseAttributes.ScreenResolution, + AppOS: n.NaviAppEvent.BaseAttributes.AppOS, + CustomerId: n.NaviAppEvent.BaseAttributes.CustomerId, + OsVersion: n.NaviAppEvent.BaseAttributes.OsVersion, + Latitude: n.NaviAppEvent.BaseAttributes.Latitude, + Longitude: n.NaviAppEvent.BaseAttributes.Longitude, + NetworkType: n.NaviAppEvent.BaseAttributes.NetworkType, + UpTime: n.NaviAppEvent.BaseAttributes.UpTime, + CarrierName: n.NaviAppEvent.BaseAttributes.CarrierName, + SessionTimeStamp: n.NaviAppEvent.BaseAttributes.SessionTimeStamp, + EventTimestamp: n.NaviAppEvent.BaseAttributes.EventTimestamp, + SessionId: n.NaviAppEvent.BaseAttributes.SessionId, + ParentSessionId: n.NaviAppEvent.BaseAttributes.ParentSessionId, + TraceId: n.NaviAppEvent.BaseAttributes.TraceId, + SnapshotPerSecond: n.NaviAppEvent.BaseAttributes.SnapshotPerSecond, + PhoneNumber: n.NaviAppEvent.BaseAttributes.PhoneNumber, + ImageType: n.NaviAppEvent.BaseAttributes.ImageType, + }, + Events: mappedEvents, + } + return mappedRequest, nil +} diff --git a/alfred/cmd/ingester/app/adapter/navi_adapter/NaviAppEventAdapter_test.go b/alfred/cmd/ingester/app/adapter/navi_adapter/NaviAppEventAdapter_test.go new file mode 100644 index 0000000..6456cd3 --- /dev/null +++ b/alfred/cmd/ingester/app/adapter/navi_adapter/NaviAppEventAdapter_test.go @@ -0,0 +1,39 @@ +package navi_adapter + +import ( + "alfred/cmd/ingester/app/model/navi" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestAdaptAppEventRequestNavi(t *testing.T) { + naviEvent := navi.AppEvent{ + BaseAttributes: navi.BaseAttributes{ + AppVersionCode: "1.0", + AppVersionName: "SampleApp", + ClientTs: 123456789, + }, + Events: []navi.EventAttributes{ + navi.EventAttributes{ + EventId: "event1", + }, + }, + } + + // Create a CosmosAppEventAdapterImpl with the sample data + adapter := NaviAppEventAdapterImpl{NaviAppEvent: naviEvent} + + // Call the function to be tested + result, err := adapter.AdaptAppEventRequestNavi() + + // Check for no errors + assert.NoError(t, err, "AdaptAppEventRequest() should not return an error") + + // Check the correctness of the result + assert.Equal(t, naviEvent.BaseAttributes.AppVersionCode, result.BaseAttributes.AppVersionCode) + assert.Equal(t, naviEvent.BaseAttributes.AppVersionName, result.BaseAttributes.AppVersionName) + assert.Equal(t, naviEvent.BaseAttributes.ClientTs, result.BaseAttributes.ClientTs) + assert.Equal(t, naviEvent.Events[0].EventId, result.Events[0].EventId) + assert.Equal(t, len(naviEvent.Events), len(result.Events), "Length of mappedEvents does not match the original Events") + +} diff --git a/alfred/cmd/ingester/app/adapter/navi_adapter/NaviSessionUploadRequestAdapter.go b/alfred/cmd/ingester/app/adapter/navi_adapter/NaviSessionUploadRequestAdapter.go new file mode 100644 index 0000000..5cb96ef --- /dev/null +++ b/alfred/cmd/ingester/app/adapter/navi_adapter/NaviSessionUploadRequestAdapter.go @@ -0,0 +1,64 @@ +package navi_adapter + +import ( + "alfred/cmd/ingester/app/model/navi" + "alfred/model/ingester" +) + +type NaviSessionUploadRequestAdapter interface { + AdaptSessionUploadRequestNavi() (ingester.SessionUploadRequest, error) +} + +type NaviSessionUploadRequestAdapterImpl struct { + NaviSessionUploadRequest navi.SessionUploadRequest +} + +func (n NaviSessionUploadRequestAdapterImpl) AdaptSessionUploadRequestNavi() (ingester.SessionUploadRequest, error) { + mappedRequest := ingester.SessionUploadRequest{ + BaseAttributes: ingester.BaseAttributes{ + AppVersionCode: n.NaviSessionUploadRequest.BaseAttributes.AppVersionCode, + AppVersionName: n.NaviSessionUploadRequest.BaseAttributes.AppVersionName, + ClientTs: n.NaviSessionUploadRequest.BaseAttributes.ClientTs, + DeviceId: n.NaviSessionUploadRequest.BaseAttributes.DeviceId, + DeviceModel: n.NaviSessionUploadRequest.BaseAttributes.DeviceModel, + DeviceManufacturer: n.NaviSessionUploadRequest.BaseAttributes.DeviceManufacturer, + ScreenResolution: n.NaviSessionUploadRequest.BaseAttributes.ScreenResolution, + AppOS: n.NaviSessionUploadRequest.BaseAttributes.AppOS, + OsVersion: n.NaviSessionUploadRequest.BaseAttributes.OsVersion, + Latitude: n.NaviSessionUploadRequest.BaseAttributes.Latitude, + Longitude: n.NaviSessionUploadRequest.BaseAttributes.Longitude, + NetworkType: n.NaviSessionUploadRequest.BaseAttributes.NetworkType, + UpTime: n.NaviSessionUploadRequest.BaseAttributes.UpTime, + CarrierName: n.NaviSessionUploadRequest.BaseAttributes.CarrierName, + CustomerId: n.NaviSessionUploadRequest.BaseAttributes.CustomerId, + SessionTimeStamp: n.NaviSessionUploadRequest.BaseAttributes.SessionTimeStamp, + EventTimestamp: n.NaviSessionUploadRequest.BaseAttributes.EventTimestamp, + Metadata: n.NaviSessionUploadRequest.BaseAttributes.Metadata, + SessionId: n.NaviSessionUploadRequest.BaseAttributes.SessionId, + ParentSessionId: n.NaviSessionUploadRequest.BaseAttributes.ParentSessionId, + TraceId: n.NaviSessionUploadRequest.BaseAttributes.TraceId, + EventEndTimeStamp: n.NaviSessionUploadRequest.BaseAttributes.EventEndTimeStamp, + SnapshotPerSecond: n.NaviSessionUploadRequest.BaseAttributes.SnapshotPerSecond, + PhoneNumber: n.NaviSessionUploadRequest.BaseAttributes.PhoneNumber, + HasErrors: n.NaviSessionUploadRequest.BaseAttributes.HasErrors, + ImageType: n.NaviSessionUploadRequest.BaseAttributes.ImageType, + FileTypeExtension: n.NaviSessionUploadRequest.BaseAttributes.FileTypeExtension, + }, + SessionUploadEventAttributes: ingester.SessionUploadEventAttributes{ + BeginningDeviceAttributes: ingester.DeviceAttributes{ + Battery: n.NaviSessionUploadRequest.SessionUploadEventAttributes.BeginningDeviceAttributes.Battery, + Cpu: n.NaviSessionUploadRequest.SessionUploadEventAttributes.BeginningDeviceAttributes.Cpu, + Memory: n.NaviSessionUploadRequest.SessionUploadEventAttributes.BeginningDeviceAttributes.Memory, + Storage: n.NaviSessionUploadRequest.SessionUploadEventAttributes.BeginningDeviceAttributes.Storage, + }, + EndDeviceAttributes: ingester.DeviceAttributes{ + Battery: n.NaviSessionUploadRequest.SessionUploadEventAttributes.EndDeviceAttributes.Battery, + Cpu: n.NaviSessionUploadRequest.SessionUploadEventAttributes.EndDeviceAttributes.Cpu, + Memory: n.NaviSessionUploadRequest.SessionUploadEventAttributes.EndDeviceAttributes.Memory, + Storage: n.NaviSessionUploadRequest.SessionUploadEventAttributes.EndDeviceAttributes.Storage, + }, + EventId: n.NaviSessionUploadRequest.SessionUploadEventAttributes.EventId, + }, + } + return mappedRequest, nil +} diff --git a/alfred/cmd/ingester/app/adapter/navi_adapter/NaviSessionUploadRequestAdapter_test.go b/alfred/cmd/ingester/app/adapter/navi_adapter/NaviSessionUploadRequestAdapter_test.go new file mode 100644 index 0000000..c12d418 --- /dev/null +++ b/alfred/cmd/ingester/app/adapter/navi_adapter/NaviSessionUploadRequestAdapter_test.go @@ -0,0 +1,45 @@ +package navi_adapter + +import ( + "alfred/cmd/ingester/app/model/navi" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestAdaptSessionUploadRequestNavi(t *testing.T) { + // Create a sample NaviSessionUploadRequest to use in the test + naviSessionUploadRequest := navi.SessionUploadRequest{ + BaseAttributes: navi.BaseAttributes{ + AppVersionCode: "1.0", + AppVersionName: "SampleApp", + ClientTs: 123456789, + // Initialize other fields as needed + }, + SessionUploadEventAttributes: navi.SessionUploadEventAttributes{ + BeginningDeviceAttributes: navi.DeviceAttributes{ + Battery: 50.0, + Cpu: 0.8, + Memory: 2048, + Storage: 65536, + }, + EventId: "event1", + }, + } + + // Create a CosmosSessionUploadRequestAdapterImpl with the sample data + adapter := NaviSessionUploadRequestAdapterImpl{NaviSessionUploadRequest: naviSessionUploadRequest} + + // Call the function to be tested + result, err := adapter.AdaptSessionUploadRequestNavi() + + // Use assertions to check for correctness + assert.NoError(t, err, "AdaptSessionUploadRequest() should not return an error") + + // Check the correctness of the result + assert.Equal(t, naviSessionUploadRequest.BaseAttributes.AppVersionCode, result.BaseAttributes.AppVersionCode) + assert.Equal(t, naviSessionUploadRequest.BaseAttributes.AppVersionName, result.BaseAttributes.AppVersionName) + assert.Equal(t, naviSessionUploadRequest.BaseAttributes.ClientTs, result.BaseAttributes.ClientTs) + assert.Equal(t, naviSessionUploadRequest.SessionUploadEventAttributes.BeginningDeviceAttributes.Battery, result.SessionUploadEventAttributes.BeginningDeviceAttributes.Battery) + assert.Equal(t, naviSessionUploadRequest.SessionUploadEventAttributes.EventId, result.SessionUploadEventAttributes.EventId) + +} diff --git a/alfred/cmd/ingester/app/factory/cosmos_app_factory.go b/alfred/cmd/ingester/app/factory/cosmos_app_factory.go new file mode 100644 index 0000000..8d4f0a0 --- /dev/null +++ b/alfred/cmd/ingester/app/factory/cosmos_app_factory.go @@ -0,0 +1,38 @@ +package factory + +import ( + "alfred/cmd/ingester/app/service" + "alfred/cmd/ingester/app/service/interfaces" + "alfred/internal/clients" + "alfred/pkg/cache" + kafka "alfred/pkg/kafka/produce" + "alfred/repositoryAccessLayer" + "alfred/utils" + "errors" +) + +type CosmosAppFactory struct { + Producer kafka.KProducer + Repositories *repositoryAccessLayer.RepositoryAccessLayer + HttpClient *clients.HttpClient + cacheClient cache.ConfigClientInterface +} + +func (f *CosmosAppFactory) CreateClient(client string) (interfaces.Client, error) { + + if client == utils.COSMOS { + return f.CreateCosmosApp(), nil + } + return nil, errors.New("invalid API key for CosmosApp") +} + +func (f *CosmosAppFactory) Initialize(producer kafka.KProducer, repositories *repositoryAccessLayer.RepositoryAccessLayer, httpClient *clients.HttpClient, cacheClient cache.ConfigClientInterface) { + f.Producer = producer + f.Repositories = repositories + f.HttpClient = httpClient + f.cacheClient = cacheClient +} + +func (f *CosmosAppFactory) CreateCosmosApp() *service.CosmosApp { + return service.NewCosmosAppHandler(f.Producer, f.Repositories, f.HttpClient) +} diff --git a/alfred/cmd/ingester/app/factory/cosmos_app_factory_test.go b/alfred/cmd/ingester/app/factory/cosmos_app_factory_test.go new file mode 100644 index 0000000..cfff96d --- /dev/null +++ b/alfred/cmd/ingester/app/factory/cosmos_app_factory_test.go @@ -0,0 +1,40 @@ +package factory + +import ( + "alfred/cmd/ingester/app/service" + "alfred/pkg/log" + "alfred/utils" + "errors" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestCosmosAppFactory_Initialize(t *testing.T) { + log.InitLogger() + cosmos_app_factory := CosmosAppFactory{} + cosmos_app_factory.Initialize(nil) + assert.Equal(t, cosmos_app_factory.Producer, nil) +} + +func TestCosmosAppFactory_CreateWebClient(t *testing.T) { + log.InitLogger() + cosmos_app_factory := CosmosAppFactory{Producer: nil} + + t.Run("Invalid client", func(t *testing.T) { + invalidClient := "test" + expectedError := errors.New("invalid API key for CosmosApp") + appClient, err := cosmos_app_factory.CreateClient(invalidClient) + assert.Nil(t, appClient) + assert.NotNil(t, err) + assert.Equal(t, expectedError, err) + }) + + t.Run("Valid client", func(t *testing.T) { + validClient := utils.COSMOS + expectedAppClient := service.NewCosmosAppHandler(nil) + appClient, err := cosmos_app_factory.CreateClient(validClient) + assert.NotNil(t, appClient) + assert.Nil(t, err) + assert.Equal(t, appClient, expectedAppClient) + }) +} diff --git a/alfred/cmd/ingester/app/factory/default_factory.go b/alfred/cmd/ingester/app/factory/default_factory.go new file mode 100644 index 0000000..5910823 --- /dev/null +++ b/alfred/cmd/ingester/app/factory/default_factory.go @@ -0,0 +1,30 @@ +package factory + +import ( + "alfred/cmd/ingester/app/service" + "alfred/cmd/ingester/app/service/interfaces" + "alfred/config" + kafka "alfred/pkg/kafka/produce" + "alfred/utils" + "errors" +) + +type DefaultWebFactory struct { + Producer kafka.KProducer +} + +func (f *DefaultWebFactory) CreateWebClient(client string) (interfaces.WebClient, error) { + var defaultWebClientList = config.GetIngesterConfig().DefaultFactoryWebClientList + if utils.Contains(defaultWebClientList, client) { + return f.CreateDefaultWebClient(), nil + } + return nil, errors.New("invalid client name for DefaultWebClient") +} + +func (f *DefaultWebFactory) Initialize(producer kafka.KProducer) { + f.Producer = producer +} + +func (f *DefaultWebFactory) CreateDefaultWebClient() *service.DefaultWebClient { + return service.NewDefaultWebClientHandler(f.Producer) +} diff --git a/alfred/cmd/ingester/app/factory/default_factory_test.go b/alfred/cmd/ingester/app/factory/default_factory_test.go new file mode 100644 index 0000000..bc521c7 --- /dev/null +++ b/alfred/cmd/ingester/app/factory/default_factory_test.go @@ -0,0 +1,40 @@ +package factory + +import ( + "alfred/cmd/ingester/app/service" + "alfred/pkg/log" + "alfred/utils" + "errors" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestLonghornFactory_Initialize(t *testing.T) { + log.InitLogger() + longhorn_factory := DefaultWebFactory{} + longhorn_factory.Initialize(nil) + assert.Equal(t, longhorn_factory.Producer, nil) +} + +func TestLongornFactory_CreateWebClient(t *testing.T) { + log.InitLogger() + longhorn_factory := DefaultWebFactory{Producer: nil} + + t.Run("Invalid client", func(t *testing.T) { + invalidClient := "test" + expectedError := errors.New("invalid client name for DefaultWebClient") + webClient, err := longhorn_factory.CreateWebClient(invalidClient) + assert.Nil(t, webClient) + assert.NotNil(t, err) + assert.Equal(t, expectedError, err) + }) + + t.Run("Valid client", func(t *testing.T) { + validClient := utils.LONGHORN + expectedWebClient := service.NewDefaultWebClientHandler(nil) + webClient, err := longhorn_factory.CreateWebClient(validClient) + assert.NotNil(t, webClient) + assert.Nil(t, err) + assert.Equal(t, webClient, expectedWebClient) + }) +} diff --git a/alfred/cmd/ingester/app/factory/default_web_client_with_cookie_data_factory.go b/alfred/cmd/ingester/app/factory/default_web_client_with_cookie_data_factory.go new file mode 100644 index 0000000..de9f6f6 --- /dev/null +++ b/alfred/cmd/ingester/app/factory/default_web_client_with_cookie_data_factory.go @@ -0,0 +1,30 @@ +package factory + +import ( + "alfred/cmd/ingester/app/service" + "alfred/cmd/ingester/app/service/interfaces" + "alfred/config" + kafka "alfred/pkg/kafka/produce" + "alfred/utils" + "errors" +) + +type DefaultWebClientWithCookieDataFactory struct { + Producer kafka.KProducer +} + +func (f *DefaultWebClientWithCookieDataFactory) CreateWebClient(client string) (interfaces.WebClient, error) { + var defaultWebClientWithCookieList = config.GetIngesterConfig().DefaultFactoryWebClientListWithCookieData + if utils.Contains(defaultWebClientWithCookieList, client) { + return f.CreateWebClientWithCookieData(), nil + } + return nil, errors.New("invalid web client name with cookie data") +} + +func (f *DefaultWebClientWithCookieDataFactory) Initialize(producer kafka.KProducer) { + f.Producer = producer +} + +func (f *DefaultWebClientWithCookieDataFactory) CreateWebClientWithCookieData() *service.DefaultWebClientWithCookieData { + return service.NewDefaultWebClientWithCookieDataHandler(f.Producer) +} diff --git a/alfred/cmd/ingester/app/factory/factory.go b/alfred/cmd/ingester/app/factory/factory.go new file mode 100644 index 0000000..ed57672 --- /dev/null +++ b/alfred/cmd/ingester/app/factory/factory.go @@ -0,0 +1,14 @@ +package factory + +import ( + "alfred/cmd/ingester/app/service/interfaces" + "alfred/internal/clients" + "alfred/pkg/cache" + kafka "alfred/pkg/kafka/produce" + "alfred/repositoryAccessLayer" +) + +type ClientFactory interface { + CreateClient(client string) (interfaces.Client, error) + Initialize(producer kafka.KProducer, repositories *repositoryAccessLayer.RepositoryAccessLayer, httpClient *clients.HttpClient, cacheClient cache.ConfigClientInterface) +} diff --git a/alfred/cmd/ingester/app/factory/navi_user_app_factory.go b/alfred/cmd/ingester/app/factory/navi_user_app_factory.go new file mode 100644 index 0000000..9ef6dec --- /dev/null +++ b/alfred/cmd/ingester/app/factory/navi_user_app_factory.go @@ -0,0 +1,38 @@ +package factory + +import ( + "alfred/cmd/ingester/app/service" + "alfred/cmd/ingester/app/service/interfaces" + "alfred/internal/clients" + "alfred/pkg/cache" + kafka "alfred/pkg/kafka/produce" + "alfred/repositoryAccessLayer" + "alfred/utils" + "errors" +) + +type NaviUserAppFactory struct { + Producer kafka.KProducer + Repositories *repositoryAccessLayer.RepositoryAccessLayer + HttpClient *clients.HttpClient + cacheClient cache.ConfigClientInterface +} + +func (f *NaviUserAppFactory) CreateClient(client string) (interfaces.Client, error) { + + if client == utils.NAVI_USER_APP { + return f.CreateNaviUserApp(), nil + } + return nil, errors.New("invalid API key for NaviUserApp") +} + +func (f *NaviUserAppFactory) Initialize(producer kafka.KProducer, repositories *repositoryAccessLayer.RepositoryAccessLayer, httpClient *clients.HttpClient, cacheClient cache.ConfigClientInterface) { + f.Producer = producer + f.Repositories = repositories + f.HttpClient = httpClient + f.cacheClient = cacheClient +} + +func (f *NaviUserAppFactory) CreateNaviUserApp() *service.NaviUserApp { + return service.NewNaviUserAppHandler(f.Producer, f.Repositories, f.HttpClient, f.cacheClient) +} diff --git a/alfred/cmd/ingester/app/factory/navi_user_app_factory_test.go b/alfred/cmd/ingester/app/factory/navi_user_app_factory_test.go new file mode 100644 index 0000000..7ae02c8 --- /dev/null +++ b/alfred/cmd/ingester/app/factory/navi_user_app_factory_test.go @@ -0,0 +1,40 @@ +package factory + +import ( + "alfred/cmd/ingester/app/service" + "alfred/pkg/log" + "alfred/utils" + "errors" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestNaviAppFactory_Initialize(t *testing.T) { + log.InitLogger() + navi_app_factory := NaviUserAppFactory{} + navi_app_factory.Initialize(nil) + assert.Equal(t, navi_app_factory.Producer, nil) +} + +func TestNaviAppFactory_CreateWebClient(t *testing.T) { + log.InitLogger() + navi_app_factory := NaviUserAppFactory{Producer: nil} + + t.Run("Invalid client", func(t *testing.T) { + invalidClient := "test" + expectedError := errors.New("invalid API key for NaviUserApp") + appClient, err := navi_app_factory.CreateClient(invalidClient) + assert.Nil(t, appClient) + assert.NotNil(t, err) + assert.Equal(t, expectedError, err) + }) + + t.Run("Valid client", func(t *testing.T) { + validClient := utils.NAVI_USER_APP + expectedAppClient := service.NewNaviUserAppHandler(nil) + appClient, err := navi_app_factory.CreateClient(validClient) + assert.NotNil(t, appClient) + assert.Nil(t, err) + assert.Equal(t, appClient, expectedAppClient) + }) +} diff --git a/alfred/cmd/ingester/app/factory/web_factory.go b/alfred/cmd/ingester/app/factory/web_factory.go new file mode 100644 index 0000000..1873e16 --- /dev/null +++ b/alfred/cmd/ingester/app/factory/web_factory.go @@ -0,0 +1,11 @@ +package factory + +import ( + "alfred/cmd/ingester/app/service/interfaces" + kafka "alfred/pkg/kafka/produce" +) + +type WebClientFactory interface { + Initialize(producer kafka.KProducer) + CreateWebClient(client string) (interfaces.WebClient, error) +} diff --git a/alfred/cmd/ingester/app/handler/client_manager.go b/alfred/cmd/ingester/app/handler/client_manager.go new file mode 100644 index 0000000..a359466 --- /dev/null +++ b/alfred/cmd/ingester/app/handler/client_manager.go @@ -0,0 +1,39 @@ +package handler + +import ( + "alfred/cmd/ingester/app/factory" + "alfred/cmd/ingester/app/service/interfaces" + "alfred/internal/clients" + "alfred/pkg/cache" + kafka "alfred/pkg/kafka/produce" + "alfred/repositoryAccessLayer" + "alfred/utils" + "errors" +) + +type ClientManager interface { + GetClientByType(clientName string) (interfaces.Client, error) +} + +type ClientManagerImpl struct { + Producer kafka.KProducer + Repositories *repositoryAccessLayer.RepositoryAccessLayer + HttpClient *clients.HttpClient + cacheClientv2 cache.ConfigClientInterface +} + +var clientFactories = map[string]factory.ClientFactory{ + utils.COSMOS: &factory.CosmosAppFactory{}, + utils.NAVI_USER_APP: &factory.NaviUserAppFactory{}, +} + +func (cm *ClientManagerImpl) GetClientByType(clientName string) (interfaces.Client, error) { + var clientFactory factory.ClientFactory + + clientFactory, found := clientFactories[clientName] + if !found { + return nil, errors.New("invalid client name: " + clientName) + } + clientFactory.Initialize(cm.Producer, cm.Repositories, cm.HttpClient, cm.cacheClientv2) + return clientFactory.CreateClient(clientName) +} diff --git a/alfred/cmd/ingester/app/handler/cruise_control_handler.go b/alfred/cmd/ingester/app/handler/cruise_control_handler.go new file mode 100644 index 0000000..1d21421 --- /dev/null +++ b/alfred/cmd/ingester/app/handler/cruise_control_handler.go @@ -0,0 +1,205 @@ +package handler + +import ( + "alfred/cmd/ingester/app/helper" + validator "alfred/cmd/ingester/app/validation" + "alfred/config" + "alfred/internal/clients" + "alfred/model/es" + "alfred/model/ingester" + "alfred/pkg/cache" + "alfred/pkg/log" + "alfred/repositoryAccessLayer" + "alfred/utils" + "encoding/json" + "github.com/gin-gonic/gin" + "go.uber.org/zap" + "net/http" + "strings" + "sync" +) + +type CruiseControlHandler struct { + cacheClient cache.ConfigClientInterface + requestHeadersValidator validator.HeadersValidator + clientManager ClientManager + litmusProxyClient clients.LitmusProxyClient + mu sync.Mutex +} + +func NewCruiseControlHandler(repositories *repositoryAccessLayer.RepositoryAccessLayer, httpClient *clients.HttpClient) *CruiseControlHandler { + headersValidator := &validator.HeadersValidatorImpl{} + clientManager := &ClientManagerImpl{Repositories: repositories, HttpClient: httpClient, cacheClientv2: cache.NewCacheConfig()} + return &CruiseControlHandler{ + cacheClient: cache.NewCacheConfig(), + requestHeadersValidator: headersValidator, + clientManager: clientManager, + litmusProxyClient: &clients.LitmusProxyClientImpl{HttpClient: httpClient.HttpClient}, + mu: sync.Mutex{}, + } +} + +func (cc *CruiseControlHandler) FetchCruiseControlConfig(c *gin.Context) { + appVersionName := c.GetHeader("appVersionName") + osVersion := c.GetHeader("osVersion") + deviceId := c.GetHeader("deviceId") + appVersionCode := c.GetHeader("appVersionCode") + appOs := strings.ToLower(c.GetHeader(utils.X_PLATFORM)) + if appOs == utils.EMPTY { + appOs = utils.ANDROID_OS + } + + client, _ := cc.clientManager.GetClientByType(utils.NAVI_USER_APP) + + response, err := client.FetchCruise(appVersionName, osVersion, appVersionCode, config.GetIngesterConfig().ElasticSearchConfig.CruiseControlIndex, deviceId, appOs) + + if err != nil { + log.Error("Error while fetching cruise control config", + zap.String("app_version_name", appVersionName), zap.String("os_version", osVersion), + zap.String("device_id", deviceId), zap.Error(err)) + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, nil)) + return + } + c.JSON(http.StatusOK, utils.SuccessResponse(response.Hits.Hits, http.StatusOK)) +} + +func (cc *CruiseControlHandler) FetchCruiseControlConfigV2(c *gin.Context) { + + appVersionName := c.GetHeader("appVersionName") + osVersion := c.GetHeader("osVersion") + deviceId := c.GetHeader("deviceId") + appVersionCode := c.GetHeader("appVersionCode") + networkType := c.GetHeader("network_type") + networkStrengthString := c.GetHeader("network_strength") + batteryString := c.GetHeader("battery") + memoryString := c.GetHeader("memory") + appOs := strings.ToLower(c.GetHeader(utils.X_PLATFORM)) + if appOs == utils.IOS_OS { + c.JSON(http.StatusOK, utils.SuccessResponse(nil, http.StatusOK)) + return + } + if appOs == utils.EMPTY { + appOs = utils.ANDROID_OS + } + + headers := c.Request.Header + + val, ok := config.GetIngesterConfig().CruiseLogsDeviceIds[deviceId] + + if ok && val.(bool) == true { + for key, values := range headers { + for _, value := range values { + log.Info("Header values sent in the request", + zap.String(key, value)) + } + } + } + + var networkStrength, battery, memory float64 + + networkStrength = utils.GetFloat64FromString(networkStrengthString) + battery = utils.GetFloat64FromString(batteryString) + memory = utils.GetFloat64FromString(memoryString) + + // Todo add authentication and validation strategies in this api + clientName, err := cc.requestHeadersValidator.ValidateHeaders(c) + if err != nil { + c.JSON(http.StatusBadRequest, utils.ErrorResponse(err, http.StatusBadRequest, nil)) + return + } + + client, err := cc.clientManager.GetClientByType(clientName) + + if err != nil { + c.JSON(http.StatusBadRequest, utils.ErrorResponse(err, http.StatusBadRequest, nil)) + } + + cruiseControlIndexMap := config.GetIngesterConfig().ElasticSearchConfig.CruiseControlIndexClientMap + index := cruiseControlIndexMap[clientName] + + deviceAttributes := ingester.DeviceAndNetworkAttributes{ + DeviceId: deviceId, + NetworkType: networkType, + NetworkStrength: networkStrength, + DeviceAttributes: ingester.DeviceAttributes{ + Battery: battery, + Memory: memory, + }, + } + + var response *es.ESResponse + + response, err = client.FetchCruise(appVersionName, osVersion, appVersionCode, index, deviceId, appOs) + if err != nil { + log.Error("Error while fetching cruise control config v2", + zap.String("app_version_name", appVersionName), zap.String("os_version", osVersion), + zap.String("device_id", deviceId), zap.String("app_version_code", appVersionCode), zap.String("appOs", appOs), zap.Error(err)) + return + } + + if response == nil { + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, nil)) + return + } + + response, err = DeepCopyESResponse(response) + if err != nil { + log.Error("Error while making deep copy of cruise control config v2", + zap.String("app_version_name", appVersionName), zap.String("os_version", osVersion), + zap.String("device_id", deviceId), zap.String("app_version_code", appVersionCode), zap.String("appOs", appOs), zap.Error(err)) + return + } + var fps int + _, respType := response.Hits.Hits[0].Source["recordings_config"].(map[string]interface{})["snapshot_per_second"].(float64) + if respType { + fps = int(response.Hits.Hits[0].Source["recordings_config"].(map[string]interface{})["snapshot_per_second"].(float64)) + } else { + fps = response.Hits.Hits[0].Source["recordings_config"].(map[string]interface{})["snapshot_per_second"].(int) + } + + if appOs == utils.ANDROID_OS && !config.GetIngesterConfig().FpsDisableFeatureMap[clientName].(bool) { + fps = client.GetFpsForDevice(deviceAttributes) + } + + response.Hits.Hits[0].Source[utils.IMAGE_TYPE] = utils.ImageTypeJpeg + if cc.litmusProxyClient.GetLitmusExperimentOutput(deviceId, config.GetIngesterConfig().OutboundServiceConfig.LitmusAlfredExperimentMap[utils.IMAGE_TYPE_EXPERIMENT]) { + response.Hits.Hits[0].Source[utils.IMAGE_TYPE] = utils.ImageTypeWebp + } + response.Hits.Hits[0].Source[utils.FILE_TYPE_EXTENSION] = utils.ZipExtension + if cc.litmusProxyClient.GetLitmusExperimentOutput(deviceId, config.GetIngesterConfig().OutboundServiceConfig.LitmusAlfredExperimentMap[utils.FILE_TYPE_EXTENSION_EXPERIMENT]) { + response.Hits.Hits[0].Source[utils.FILE_TYPE_EXTENSION] = utils.ZipXzExtension + } + response.Hits.Hits[0].Source["current_time"] = utils.GetCurrentTimeInMillis() + response.Hits.Hits[0].Source["recordings_config"].(map[string]interface{})["snapshot_per_second"] = fps + c.JSON(http.StatusOK, utils.SuccessResponse(response.Hits.Hits, http.StatusOK)) + response.Hits.Hits[0].Source["recordings_config"].(map[string]interface{})["snapshot_per_second"] = 1 +} + +func (cc *CruiseControlHandler) InvalidateCruiseCache(c *gin.Context) { + clientName, err := cc.requestHeadersValidator.ValidateHeaders(c) + if err != nil { + c.JSON(http.StatusBadRequest, utils.ErrorResponse(err, http.StatusBadRequest, nil)) + return + } + appVersionName := c.Query("appVersionName") + appOs := strings.ToLower(c.Query("appOs")) + cacheKey := helper.GenerateCacheKey(appVersionName, appOs, clientName) + cc.cacheClient.Delete(cacheKey) + c.JSON(http.StatusOK, utils.SuccessResponse("Success", http.StatusOK)) +} + +func DeepCopyESResponse(src *es.ESResponse) (*es.ESResponse, error) { + // Using JSON as an intermediary for deep copying + previousResponseBytes, err := json.Marshal(src) + if err != nil { + return nil, err + } + + var newResponse es.ESResponse + err = json.Unmarshal(previousResponseBytes, &newResponse) + if err != nil { + return nil, err + } + + return &newResponse, nil +} diff --git a/alfred/cmd/ingester/app/handler/cruise_control_handler_test.go b/alfred/cmd/ingester/app/handler/cruise_control_handler_test.go new file mode 100644 index 0000000..b9b37ab --- /dev/null +++ b/alfred/cmd/ingester/app/handler/cruise_control_handler_test.go @@ -0,0 +1,252 @@ +package handler + +import ( + "alfred/config" + "alfred/mocks" + "alfred/pkg/log" + "alfred/utils" + "errors" + "github.com/gin-gonic/gin" + "github.com/spf13/viper" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "go.uber.org/goleak" + "net/http" + "net/http/httptest" + "strings" + "testing" +) + +func TestFetchCruiseControlConfig_Fail(t *testing.T) { + + log.InitLogger() + + cruiseService := &mocks.MockCruiseService{} + cacheClient := &mocks.MockCacheClient{} + validateHeaders := &mocks.MockHeadersValidator{} + + cc := &CruiseControlHandler{cruiseService, cacheClient, validateHeaders} + + w := httptest.NewRecorder() + req, _ := http.NewRequest("GET", "/cruise", nil) + req.Header.Set("appVersionName", "4.0.0_1") + req.Header.Set("osVersion", "31") + req.Header.Set("deviceId", "d") + c, _ := gin.CreateTestContext(w) + c.Request = req + + cruiseService.On("FetchCruise", mock.Anything, mock.Anything, mock.Anything, mock.Anything, utils.NAVI_USER_APP).Return(nil, errors.New("error")) + + cc.FetchCruiseControlConfig(c) + + cruiseService.AssertExpectations(t) + + assert.Equal(t, http.StatusInternalServerError, w.Code) + assert.Contains(t, w.Body.String(), "error") + +} + +func TestFetchCruiseControlConfig_Pass(t *testing.T) { + + log.InitLogger() + + cruiseService := &mocks.MockCruiseService{} + cacheClient := &mocks.MockCacheClient{} + validateHeaders := &mocks.MockHeadersValidator{} + + cc := &CruiseControlHandler{cruiseService, cacheClient, validateHeaders} + + w := httptest.NewRecorder() + req, _ := http.NewRequest("GET", "/cruise", nil) + req.Header.Set("appVersionName", "4.0.0_1") + req.Header.Set("osVersion", "31") + req.Header.Set("deviceId", "d") + c, _ := gin.CreateTestContext(w) + c.Request = req + + cruiseService.On("FetchCruise", mock.Anything, mock.Anything, mock.Anything, mock.Anything, utils.NAVI_USER_APP).Return(mock.Anything, nil) + + cc.FetchCruiseControlConfig(c) + + cruiseService.AssertExpectations(t) + + assert.Equal(t, http.StatusOK, w.Code) + assert.Contains(t, w.Body.String(), "31") + +} + +func TestFetchCruiseControlConfigV2_Pass(t *testing.T) { + log.InitLogger() + + cruiseClientMap := "{\"CosmosApp\": \"sa-alfred-cosmos-cruise-control-index\", \"NaviUserApp\": \"sa-alfred-cruise-control-index\"}" + t.Setenv("ELASTICSEARCH_CRUISE_CONTROL_INDEX_CLIENT_MAP", cruiseClientMap) + t.Setenv("FRAMES_PER_SECOND_DISABLE_FEATURE", "false") + logsDevice := "{\"d\":true}" + t.Setenv("LOGS_DEVICE_IDS", logsDevice) + + viper.AutomaticEnv() + viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) + + config.InitIngesterConfig() + + cruiseService := &mocks.MockCruiseService{} + cacheClient := &mocks.MockCacheClient{} + validateHeaders := &mocks.MockHeadersValidator{} + + cc := &CruiseControlHandler{cruiseService, cacheClient, validateHeaders} + + w := httptest.NewRecorder() + req, _ := http.NewRequest("GET", "/v2/cruise", nil) + req.Header.Set("appVersionName", "4.0.0_1") + req.Header.Set("osVersion", "31") + req.Header.Set("deviceId", "d") + req.Header.Set("appVersionCode", "366") + req.Header.Set("network_type", "5G") + req.Header.Set("network_strength", "6000") + req.Header.Set("battery", "70") + req.Header.Set("memory", "40") + req.Header.Set(utils.X_API_KEY, "apiKey2") + c, _ := gin.CreateTestContext(w) + c.Request = req + + validateHeaders.On("ValidateHeaders", c).Return(utils.NAVI_USER_APP, nil) + cruiseService.On("FetchCruise", mock.Anything, mock.Anything, mock.Anything, mock.Anything, utils.NAVI_USER_APP).Return(mock.Anything, nil) + cruiseService.On("GetFpsForDevice", mock.Anything).Return(2) + + cc.FetchCruiseControlConfigV2(c) + + validateHeaders.AssertExpectations(t) + cruiseService.AssertExpectations(t) + + assert.Equal(t, http.StatusOK, w.Code) + assert.Contains(t, w.Body.String(), "2") +} + +func TestFetchCruiseControlConfigV2_Headers_Fail(t *testing.T) { + log.InitLogger() + + cruiseService := &mocks.MockCruiseService{} + cacheClient := &mocks.MockCacheClient{} + validateHeaders := &mocks.MockHeadersValidator{} + + cc := &CruiseControlHandler{cruiseService, cacheClient, validateHeaders} + + w := httptest.NewRecorder() + req, _ := http.NewRequest("GET", "/v2/cruise", nil) + req.Header.Set("appVersionName", "4.0.0_1") + req.Header.Set("osVersion", "31") + req.Header.Set("deviceId", "d") + req.Header.Set("appVersionCode", "366") + req.Header.Set("network_type", "5G") + req.Header.Set("network_strength", "6000") + req.Header.Set("battery", "70") + req.Header.Set("memory", "40") + req.Header.Set(utils.X_API_KEY, "apiKey2") + c, _ := gin.CreateTestContext(w) + c.Request = req + + validateHeaders.On("ValidateHeaders", c).Return(utils.EMPTY, errors.New("header error")) + + cc.FetchCruiseControlConfigV2(c) + + validateHeaders.AssertExpectations(t) + + assert.Equal(t, http.StatusBadRequest, w.Code) + assert.Contains(t, w.Body.String(), "header error") +} + +func TestFetchCruiseControlConfigV2_Response_Fail(t *testing.T) { + defer goleak.VerifyNone(t) + + log.InitLogger() + + cruiseClientMap := "{\"CosmosApp\": \"sa-alfred-cosmos-cruise-control-index\", \"NaviUserApp\": \"sa-alfred-cruise-control-index\"}" + t.Setenv("ELASTICSEARCH_CRUISE_CONTROL_INDEX_CLIENT_MAP", cruiseClientMap) + + viper.AutomaticEnv() + viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) + + config.InitIngesterConfig() + + cruiseService := &mocks.MockCruiseService{} + cacheClient := &mocks.MockCacheClient{} + validateHeaders := &mocks.MockHeadersValidator{} + + cc := &CruiseControlHandler{cruiseService, cacheClient, validateHeaders} + + w := httptest.NewRecorder() + req, _ := http.NewRequest("GET", "/v2/cruise", nil) + req.Header.Set("appVersionName", "4.0.0_1") + req.Header.Set("osVersion", "31") + req.Header.Set("deviceId", "d") + req.Header.Set("appVersionCode", "366") + req.Header.Set("network_type", "5G") + req.Header.Set("network_strength", "6000") + req.Header.Set("battery", "70") + req.Header.Set("memory", "40") + req.Header.Set(utils.X_API_KEY, "apiKey2") + c, _ := gin.CreateTestContext(w) + c.Request = req + + validateHeaders.On("ValidateHeaders", c).Return(utils.NAVI_USER_APP, nil) + cruiseService.On("FetchCruise", mock.Anything, mock.Anything, mock.Anything, mock.Anything, utils.NAVI_USER_APP).Return(nil, errors.New("response error")) + cruiseService.On("GetFpsForDevice", mock.Anything).Return(2) + + cc.FetchCruiseControlConfigV2(c) + + validateHeaders.AssertExpectations(t) + cruiseService.AssertExpectations(t) + + assert.Equal(t, http.StatusInternalServerError, w.Code) + assert.Contains(t, w.Body.String(), "response error") +} + +func TestInvalidateCruiseCache_Pass(t *testing.T) { + log.InitLogger() + + cruiseService := &mocks.MockCruiseService{} + cacheClient := &mocks.MockCacheClient{} + validateHeaders := &mocks.MockHeadersValidator{} + + cc := &CruiseControlHandler{cruiseService, cacheClient, validateHeaders} + + w := httptest.NewRecorder() + req, _ := http.NewRequest("GET", "/invalidate/cache", nil) + c, _ := gin.CreateTestContext(w) + c.Request = req + + validateHeaders.On("ValidateHeaders", c).Return(utils.NAVI_USER_APP, nil) + cacheClient.On("Delete", mock.Anything).Return() + + cc.InvalidateCruiseCache(c) + + validateHeaders.AssertExpectations(t) + cacheClient.AssertExpectations(t) + + assert.Equal(t, http.StatusOK, w.Code) + assert.Contains(t, w.Body.String(), "Success") +} + +func TestInvalidateCruiseCache_Fail(t *testing.T) { + log.InitLogger() + + cruiseService := &mocks.MockCruiseService{} + cacheClient := &mocks.MockCacheClient{} + validateHeaders := &mocks.MockHeadersValidator{} + + cc := &CruiseControlHandler{cruiseService, cacheClient, validateHeaders} + + w := httptest.NewRecorder() + req, _ := http.NewRequest("GET", "/invalidate/cache", nil) + c, _ := gin.CreateTestContext(w) + c.Request = req + + validateHeaders.On("ValidateHeaders", c).Return(utils.EMPTY, errors.New("error")) + + cc.InvalidateCruiseCache(c) + + validateHeaders.AssertExpectations(t) + + assert.Equal(t, http.StatusBadRequest, w.Code) + assert.Contains(t, w.Body.String(), "error") +} diff --git a/alfred/cmd/ingester/app/handler/ingester_handler.go b/alfred/cmd/ingester/app/handler/ingester_handler.go new file mode 100644 index 0000000..120dc1d --- /dev/null +++ b/alfred/cmd/ingester/app/handler/ingester_handler.go @@ -0,0 +1,167 @@ +package handler + +import ( + "alfred/cmd/ingester/app/service/interfaces" + validator "alfred/cmd/ingester/app/validation" + "alfred/config" + "alfred/internal/clients" + "alfred/model/ingester" + kafka "alfred/pkg/kafka/produce" + "alfred/pkg/log" + "alfred/repositoryAccessLayer" + "alfred/utils" + "errors" + "fmt" + "net/http" + + "github.com/gin-gonic/gin" + "go.uber.org/zap" +) + +type IngesterHandler struct { + producer kafka.KProducer + clientManager ClientManager + webClientManager WebClientManager + headersValidator validator.HeadersValidator + webClientValidator validator.WebClientValidator +} + +func NewIngesterHandler(producer kafka.KProducer, repositories *repositoryAccessLayer.RepositoryAccessLayer, httpClient *clients.HttpClient) *IngesterHandler { + clientManager := &ClientManagerImpl{Producer: producer, Repositories: repositories, HttpClient: httpClient} + webClientManager := NewWebClientManagerImpl(producer) + headersValidator := &validator.HeadersValidatorImpl{} + webClientValidator := &validator.WebClientValidatorImpl{CommonValidator: validator.CommonValidationStrategyImpl{}} + return &IngesterHandler{ + producer: producer, + clientManager: clientManager, + webClientManager: webClientManager, + headersValidator: headersValidator, + webClientValidator: webClientValidator, + } +} + +func (ih *IngesterHandler) IngestEvent(c *gin.Context) { + var alfredEventsAttributes ingester.AppEvent + if err := c.ShouldBindJSON(&alfredEventsAttributes); err != nil { + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, nil)) + return + } + + err := ih.producer.SendMessage(alfredEventsAttributes, config.GetIngesterConfig().KafkaConfig.EventIngestionTopic, alfredEventsAttributes.BaseAttributes.DeviceId, utils.NAVI_USER_APP) + if err != nil { + log.Error("Error while publishing to kafka") + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, nil)) + } +} + +func (ih *IngesterHandler) IngestSessionRecordingEvent(c *gin.Context) { + var alfredSessionRecordingEvent ingester.SessionUploadRequest + if err := c.ShouldBindJSON(&alfredSessionRecordingEvent); err != nil { + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, nil)) + return + } + isValid := validateSessionRecordingEvent(alfredSessionRecordingEvent) + if !isValid { + log.Error("[INGEST_SESSION] bad request exception", zap.String("body", fmt.Sprintf("%v", alfredSessionRecordingEvent))) + c.JSON(http.StatusBadRequest, utils.ErrorResponse(errors.New("bad request"), http.StatusBadRequest, nil)) + return + } + + err := ih.producer.SendMessage(alfredSessionRecordingEvent, config.GetIngesterConfig().KafkaConfig.SessionUploadTopic, alfredSessionRecordingEvent.BaseAttributes.DeviceId, utils.NAVI_USER_APP) + if err != nil { + log.Error("Error while publishing to kafka") + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, nil)) + } +} + +func (ih *IngesterHandler) IngestMetrics(c *gin.Context) { + var appMetrics ingester.AppMetrics + if err := c.ShouldBindJSON(&appMetrics); err != nil { + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, nil)) + return + } + + err := ih.producer.SendMessage(appMetrics, config.GetIngesterConfig().KafkaConfig.MetricIngestionTopic, appMetrics.BaseAttributes.DeviceId, "") + if err != nil { + log.Error("Error while publishing to kafka") + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, nil)) + } +} + +func validateSessionRecordingEvent(event ingester.SessionUploadRequest) bool { + return utils.ValidateId(event.BaseAttributes.SessionId, utils.SESSION_SUFFIX) && + utils.ValidateZipName(event.SessionUploadEventAttributes.EventId, utils.EVENT_SUFFIX) +} + +func (ih *IngesterHandler) IngestSessionRecordingEventV2(c *gin.Context) { + client, err := ih.validateAndGetClient(c) + if err != nil { + log.Error("sessions could not be ingested as client was not mapped") + return + } + client.IngestSession(c) +} + +func (ih *IngesterHandler) IngestEventV2(c *gin.Context) { + + client, err := ih.validateAndGetClient(c) + if err != nil { + log.Error("events could not be ingested as client was not mapped") + return + } + client.IngestEvent(c) +} + +func (ih *IngesterHandler) IngestMetricsV2(c *gin.Context) { + + client, err := ih.validateAndGetClient(c) + if err != nil { + log.Error("events could not be ingested as client was not mapped") + return + } + client.IngestAppMetrics(c) +} + +func (ih *IngesterHandler) validateAndGetClient(c *gin.Context) (interfaces.Client, error) { + clientName, err := ih.headersValidator.ValidateHeaders(c) + if err != nil { + c.JSON(http.StatusBadRequest, utils.ErrorResponse(err, http.StatusBadRequest, nil)) + return nil, errors.New("api key invalid for client") + } + + client, err := ih.clientManager.GetClientByType(clientName) + + if err != nil { + c.JSON(http.StatusBadRequest, utils.ErrorResponse(err, http.StatusBadRequest, nil)) + return nil, errors.New("client not configured in factory") + } + return client, nil +} + +func (ih *IngesterHandler) IngestWebSessionRecordingEventV2(c *gin.Context) { + webClient, webClientName, err := ih.validateAndGetWebClient(c) + if err != nil { + log.Error("sessions could not be ingested as client was not mapped", zap.String("webClientName", webClientName)) + return + } + webClient.IngestWebSession(c, webClientName) +} + +func (ih *IngesterHandler) validateAndGetWebClient(c *gin.Context) (interfaces.WebClient, string, error) { + webClientName, err := ih.headersValidator.ValidateHeaders(c) + if err != nil { + webClientName, err = ih.webClientValidator.ValidateWebClientUsingProjectName(c) + } + if err != nil { + c.JSON(http.StatusBadRequest, utils.ErrorResponse(err, http.StatusBadRequest, nil)) + return nil, utils.EMPTY, errors.New("invalid client") + } + + webClient, err := ih.webClientManager.GetWebClientByType(webClientName) + + if err != nil { + c.JSON(http.StatusBadRequest, utils.ErrorResponse(err, http.StatusBadRequest, nil)) + return nil, utils.EMPTY, errors.New("client not configured in factory") + } + return webClient, webClientName, nil +} diff --git a/alfred/cmd/ingester/app/handler/ingester_handler_test.go b/alfred/cmd/ingester/app/handler/ingester_handler_test.go new file mode 100644 index 0000000..f7ddfbf --- /dev/null +++ b/alfred/cmd/ingester/app/handler/ingester_handler_test.go @@ -0,0 +1,290 @@ +package handler + +import ( + validator "alfred/cmd/ingester/app/validation" + "alfred/mocks" + "alfred/model/ingester" + "alfred/pkg/log" + "alfred/utils" + "bytes" + "encoding/json" + "errors" + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "io" + "net/http" + "net/http/httptest" + "testing" +) + +func TestIngestEvent_Pass(t *testing.T) { + mockKProducer := &mocks.MockKProducer{} + + ih := &IngesterHandler{ + producer: mockKProducer, + } + + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + + validData := ingester.AppEvent{} + validJsonData, _ := json.Marshal(validData) + validRequestBody := bytes.NewBuffer(validJsonData) + c.Request = &http.Request{Body: io.NopCloser(validRequestBody)} + + mockKProducer.On("SendMessage", validData, mock.Anything, mock.Anything, utils.NAVI_USER_APP).Return(nil) + + ih.IngestEvent(c) + + mockKProducer.AssertExpectations(t) + + assert.Equal(t, http.StatusOK, w.Code) +} + +func TestIngestEvent_ReqBody_Fail(t *testing.T) { + mockKProducer := &mocks.MockKProducer{} + + ih := &IngesterHandler{ + producer: mockKProducer, + } + + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + + inValidData := "invalid" + invalidJsonData, _ := json.Marshal(inValidData) + invalidRequestBody := bytes.NewBuffer(invalidJsonData) + c.Request = &http.Request{Body: io.NopCloser(invalidRequestBody)} + + ih.IngestEvent(c) + + assert.Equal(t, http.StatusInternalServerError, w.Code) +} + +func TestIngestEvent_Producer_Fail(t *testing.T) { + log.InitLogger() + + mockKProducer := &mocks.MockKProducer{} + + ih := &IngesterHandler{ + producer: mockKProducer, + } + + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + + validData := ingester.AppEvent{} + validJsonData, _ := json.Marshal(validData) + validRequestBody := bytes.NewBuffer(validJsonData) + c.Request = &http.Request{Body: io.NopCloser(validRequestBody)} + + mockKProducer.On("SendMessage", validData, mock.Anything, mock.Anything, utils.NAVI_USER_APP).Return(errors.New("error")) + + ih.IngestEvent(c) + + mockKProducer.AssertExpectations(t) + + assert.Equal(t, http.StatusInternalServerError, w.Code) + assert.Contains(t, w.Body.String(), "error") +} + +func TestIngestSessionRecordingEvent_Pass(t *testing.T) { + log.InitLogger() + + mockKProducer := &mocks.MockKProducer{} + + ih := &IngesterHandler{ + producer: mockKProducer, + } + + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + + validData := ingester.SessionUploadRequest{ + BaseAttributes: ingester.BaseAttributes{ + SessionId: "6a323eb3-58a5-48a6-a36a-25d85a05ec32" + utils.SESSION_SUFFIX, + }, + SessionUploadEventAttributes: ingester.SessionUploadEventAttributes{ + EventId: "6a323eb3-58a5-48a6-a36a-25d85a05ec32" + utils.EVENT_SUFFIX, + }, + } + validJsonData, _ := json.Marshal(validData) + validRequestBody := bytes.NewBuffer(validJsonData) + c.Request = &http.Request{Body: io.NopCloser(validRequestBody)} + + mockKProducer.On("SendMessage", validData, mock.Anything, mock.Anything, utils.NAVI_USER_APP).Return(nil) + + ih.IngestSessionRecordingEvent(c) + + mockKProducer.AssertExpectations(t) + + assert.Equal(t, http.StatusOK, w.Code) + +} + +func TestIngestSessionRecordingEvent_ReqBodyJson_Fail(t *testing.T) { + log.InitLogger() + + mockKProducer := &mocks.MockKProducer{} + + ih := &IngesterHandler{ + producer: mockKProducer, + } + + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + + validData := "invalid" + validJsonData, _ := json.Marshal(validData) + validRequestBody := bytes.NewBuffer(validJsonData) + c.Request = &http.Request{Body: io.NopCloser(validRequestBody)} + + ih.IngestSessionRecordingEvent(c) + + assert.Equal(t, http.StatusInternalServerError, w.Code) +} + +func TestIngestSessionRecordingEvent_ReqBody_Fail(t *testing.T) { + log.InitLogger() + + mockKProducer := &mocks.MockKProducer{} + + ih := &IngesterHandler{ + producer: mockKProducer, + } + + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + + validData := ingester.SessionUploadRequest{ + BaseAttributes: ingester.BaseAttributes{ + SessionId: "6a323eb3-58a5-48a6-a36a-", + }, + SessionUploadEventAttributes: ingester.SessionUploadEventAttributes{ + EventId: "6a323eb3-58a5-48a6-a36a-25d85a05ec32", + }, + } + validJsonData, _ := json.Marshal(validData) + validRequestBody := bytes.NewBuffer(validJsonData) + c.Request = &http.Request{Body: io.NopCloser(validRequestBody)} + + ih.IngestSessionRecordingEvent(c) + + assert.Equal(t, http.StatusBadRequest, w.Code) +} + +func TestIngestSessionRecordingEvent_Kafka_Fail(t *testing.T) { + log.InitLogger() + + mockKProducer := &mocks.MockKProducer{} + + ih := &IngesterHandler{ + producer: mockKProducer, + } + + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + + validData := ingester.SessionUploadRequest{ + BaseAttributes: ingester.BaseAttributes{ + SessionId: "6a323eb3-58a5-48a6-a36a-25d85a05ec32" + utils.SESSION_SUFFIX, + }, + SessionUploadEventAttributes: ingester.SessionUploadEventAttributes{ + EventId: "6a323eb3-58a5-48a6-a36a-25d85a05ec32" + utils.EVENT_SUFFIX, + }, + } + + validJsonData, _ := json.Marshal(validData) + validRequestBody := bytes.NewBuffer(validJsonData) + c.Request = &http.Request{Body: io.NopCloser(validRequestBody)} + + mockKProducer.On("SendMessage", validData, mock.Anything, mock.Anything, utils.NAVI_USER_APP).Return(errors.New("error")) + + ih.IngestSessionRecordingEvent(c) + + mockKProducer.AssertExpectations(t) + + assert.Equal(t, http.StatusInternalServerError, w.Code) + assert.Contains(t, w.Body.String(), "error") +} + +func TestIngesterHandler_IngestWebSessionRecordingEventV2(t *testing.T) { + log.InitLogger() + mockClientManager := new(mocks.MockClientManager) + mockWebClientManager := new(mocks.MockWebClientManager) + mockWebClientValidator := new(mocks.MockWebClientValidator) + + ih := IngesterHandler{ + producer: nil, + clientManager: mockClientManager, + webClientManager: mockWebClientManager, + headersValidator: &validator.HeadersValidatorImpl{}, + webClientValidator: mockWebClientValidator, + } + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + + t.Run("Invalid client", func(t *testing.T) { + expectedError := errors.New(utils.INVALID_CLIENT) + mockWebClientValidator.On("ValidateWebClientUsingProjectName", c).Return("", expectedError).Once() + ih.IngestWebSessionRecordingEventV2(c) + mockWebClientValidator.AssertExpectations(t) + mockWebClientManager.AssertExpectations(t) + mockClientManager.AssertExpectations(t) + }) + + t.Run("client not configured in factory ", func(t *testing.T) { + expectedClient := "RAVEN" + mockWebClient := new(mocks.MockWebClient) + expectedError := errors.New("") + mockWebClientValidator.On("ValidateWebClientUsingProjectName", c).Return(expectedClient, nil).Once() + mockWebClientManager.On("GetWebClientByType", expectedClient).Return(mockWebClient, expectedError).Once() + ih.IngestWebSessionRecordingEventV2(c) + mockWebClientValidator.AssertExpectations(t) + mockWebClientManager.AssertExpectations(t) + mockClientManager.AssertExpectations(t) + }) + + t.Run("Valid client", func(t *testing.T) { + expectedClient := "RAVEN" + mockWebClient := new(mocks.MockWebClient) + mockWebClientValidator.On("ValidateWebClientUsingProjectName", c).Return(expectedClient, nil).Once() + mockWebClientManager.On("GetWebClientByType", expectedClient).Return(mockWebClient, nil).Once() + mockWebClient.On("IngestWebSession", c).Once() + ih.IngestWebSessionRecordingEventV2(c) + mockWebClientValidator.AssertExpectations(t) + mockWebClientManager.AssertExpectations(t) + mockClientManager.AssertExpectations(t) + }) + +} + +func TestSessionRecordingEventV2_Pass(t *testing.T) { + log.InitLogger() + + mockClient := &mocks.MockIngesterClient{} + mockClientManager := &mocks.MockClientManager{} + mockHeadersValidator := &mocks.MockHeadersValidator{} + + ih := IngesterHandler{clientManager: mockClientManager, headersValidator: mockHeadersValidator} + + w := httptest.NewRecorder() + req, _ := http.NewRequest("GET", "/v2/ingest/session", nil) + req.Header.Set(utils.X_API_KEY, "apiKey2") + c, _ := gin.CreateTestContext(w) + c.Request = req + + mockHeadersValidator.On("ValidateHeaders", c).Return(utils.NAVI_USER_APP, nil) + mockClientManager.On("GetClientByType", utils.NAVI_USER_APP).Return(mockClient, nil) + mockClient.On("IngestSession", c).Return() + + ih.IngestSessionRecordingEventV2(c) + + mockClient.AssertExpectations(t) + mockClientManager.AssertExpectations(t) + mockHeadersValidator.AssertExpectations(t) + + assert.Equal(t, http.StatusOK, w.Code) + +} diff --git a/alfred/cmd/ingester/app/handler/s3_handler.go b/alfred/cmd/ingester/app/handler/s3_handler.go new file mode 100644 index 0000000..169c9b1 --- /dev/null +++ b/alfred/cmd/ingester/app/handler/s3_handler.go @@ -0,0 +1,88 @@ +package handler + +import ( + validator "alfred/cmd/ingester/app/validation" + "alfred/config" + "alfred/pkg/log" + "alfred/pkg/s3" + "alfred/utils" + "net/http" + + "github.com/gin-gonic/gin" + "go.uber.org/zap" +) + +type S3Handler struct { + s3Client s3.S3Client + headersValidator validator.HeadersValidator +} + +func NewS3Handler(s3Client s3.S3Client) *S3Handler { + headersValidatorImpl := &validator.HeadersValidatorImpl{} + return &S3Handler{ + s3Client: s3Client, + headersValidator: headersValidatorImpl, + } +} + +func (sh *S3Handler) FetchPresignedUploadURL(c *gin.Context) { + sessionId := c.Param("sessionId") + preSignedURL, err := sh.s3Client.PreSignedUploadUrl( + config.GetIngesterConfig().S3Config.SessionUploadBucket, sessionId, utils.ZipExtension.String(), utils.ZipContentType, + ) + if err != nil { + log.Error("fetch presigned url failed", zap.String("session_id", sessionId), zap.Error(err)) + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, nil)) + return + } + c.JSON(http.StatusOK, utils.SuccessResponse(preSignedURL, http.StatusOK)) +} + +func (sh *S3Handler) FetchPreSignedUploadUrlV2(c *gin.Context) { + + // Todo add authentication and validation strategies in this api + clientName, err := sh.headersValidator.ValidateHeaders(c) + + if err != nil { + c.JSON(http.StatusBadRequest, utils.ErrorResponse(err, http.StatusBadRequest, nil)) + return + } + + clientToBucketMap := config.GetIngesterConfig().S3Config.SessionUploadBucketClientMap + bucketId := clientToBucketMap[clientName] + + fileTypeExtension := c.Query("fileTypeExtension") + if fileTypeExtension == "" { + fileTypeExtension = utils.ZipExtension.String() + } + contentType := getContentType(fileTypeExtension) + + fileName := c.Param("fileName") + dirName := c.Query("directoryName") + isValidDirName := utils.ValidateDirectoryName(dirName) + if isValidDirName { + fileName = dirName + utils.FORWARD_SLASH + fileName + } + preSignedURL, err := sh.s3Client.PreSignedUploadUrl( + bucketId, fileName, fileTypeExtension, contentType, + ) + if err != nil { + log.Error("fetch presigned url v2 failed", zap.String("fileName", fileName), zap.String("client", clientName), zap.Error(err)) + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, nil)) + return + } + c.JSON(http.StatusOK, utils.SuccessResponse(preSignedURL, http.StatusOK)) +} + +func getContentType(fileTypeExtension string) string { + switch fileTypeExtension { + case utils.ZipExtension.String(): + return utils.ZipContentType + case utils.ZipXzExtension.String(): + return "application/x-xz" + case utils.JsonExtension.String(): + return utils.ApplicationJsonContentType + default: + return "application/octet-stream" + } +} diff --git a/alfred/cmd/ingester/app/handler/s3_handler_test.go b/alfred/cmd/ingester/app/handler/s3_handler_test.go new file mode 100644 index 0000000..2bd6f49 --- /dev/null +++ b/alfred/cmd/ingester/app/handler/s3_handler_test.go @@ -0,0 +1,149 @@ +package handler + +import ( + "alfred/mocks" + "alfred/pkg/log" + "alfred/utils" + "errors" + "net/http" + "net/http/httptest" + "testing" + + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/assert" +) + +func TestFetchPresignedUploadURL(t *testing.T) { + + s3Client := &mocks.MockS3Client{} + + sh := NewS3Handler(s3Client) + + t.Run("Success", func(t *testing.T) { + + w := httptest.NewRecorder() + http.NewRequest("GET", "/sessions/123", nil) + c, _ := gin.CreateTestContext(w) + c.Params = append(c.Params, gin.Param{Key: "sessionId", Value: "123"}) + + s3Client.On("PreSignedUploadUrl", "", "123", ".zip", "application/zip").Return("mockedURL", nil) + + sh.FetchPresignedUploadURL(c) + + s3Client.AssertExpectations(t) + + assert.Equal(t, http.StatusOK, w.Code) + assert.Contains(t, w.Body.String(), "mockedURL") + }) +} + +func TestFetchPresignedUploadURL_Fail(t *testing.T) { + + s3Client := &mocks.MockS3Client{} + + sh := NewS3Handler(s3Client) + + log.InitLogger() + + t.Run("Error", func(t *testing.T) { + + w := httptest.NewRecorder() + http.NewRequest("GET", "/sessions/123", nil) + c, _ := gin.CreateTestContext(w) + c.Params = append(c.Params, gin.Param{Key: "sessionId", Value: "123"}) + + s3Client.On("PreSignedUploadUrl", "", "123", ".zip", "application/zip").Return("", errors.New("mocked error")) + + sh.FetchPresignedUploadURL(c) + + //headersValidator.AssertExpectations(t) + s3Client.AssertExpectations(t) + + assert.Equal(t, http.StatusInternalServerError, w.Code) + assert.Contains(t, w.Body.String(), "mocked error") + }) +} + +func TestFetchPreSignedUploadUrlV2(t *testing.T) { + + s3Client := &mocks.MockS3Client{} + + validateHeaders := &mocks.MockHeadersValidator{} + + sh := &s3Handler{s3Client, validateHeaders} + + log.InitLogger() + + w := httptest.NewRecorder() + req, _ := http.NewRequest("GET", "/sessions/123", nil) + req.Header.Set("X-Api-Key", "c") + c, _ := gin.CreateTestContext(w) + c.Params = append(c.Params, gin.Param{Key: "sessionId", Value: "123"}) + c.Request = req + + validateHeaders.On("ValidateHeaders", c).Return("cosmos", nil) + + s3Client.On("PreSignedUploadUrl", "", "123", ".zip", "application/zip").Return("mockedURL", nil) + + sh.FetchPreSignedUploadUrlV2(c) + + validateHeaders.AssertExpectations(t) + s3Client.AssertExpectations(t) + + assert.Equal(t, http.StatusOK, w.Code) + assert.Contains(t, w.Body.String(), "mockedURL") +} + +func TestFetchPreSignedUploadUrlV2_Fail1(t *testing.T) { + s3Client := &mocks.MockS3Client{} + + validateHeaders := &mocks.MockHeadersValidator{} + + sh := &s3Handler{s3Client, validateHeaders} + + log.InitLogger() + + w := httptest.NewRecorder() + req, _ := http.NewRequest("GET", "/sessions/123", nil) + req.Header.Set("X-Api-Key", "c") + c, _ := gin.CreateTestContext(w) + c.Params = append(c.Params, gin.Param{Key: "sessionId", Value: "123"}) + c.Request = req + + validateHeaders.On("ValidateHeaders", c).Return("", errors.New(utils.INVALID_CLIENT)) + + sh.FetchPreSignedUploadUrlV2(c) + + validateHeaders.AssertExpectations(t) + + assert.Equal(t, http.StatusBadRequest, w.Code) + assert.Contains(t, w.Body.String(), utils.INVALID_CLIENT) +} + +func TestFetchPreSignedUploadUrlV2_Fail2(t *testing.T) { + s3Client := &mocks.MockS3Client{} + + validateHeaders := &mocks.MockHeadersValidator{} + + sh := &s3Handler{s3Client, validateHeaders} + + log.InitLogger() + + w := httptest.NewRecorder() + req, _ := http.NewRequest("GET", "/sessions/123", nil) + req.Header.Set("X-Api-Key", "c") + c, _ := gin.CreateTestContext(w) + c.Params = append(c.Params, gin.Param{Key: "sessionId", Value: "123"}) + c.Request = req + + validateHeaders.On("ValidateHeaders", c).Return("cosmos", nil) + s3Client.On("PreSignedUploadUrl", "", "123", ".zip", "application/zip").Return("", errors.New("mocked error")) + + sh.FetchPreSignedUploadUrlV2(c) + + validateHeaders.AssertExpectations(t) + s3Client.AssertExpectations(t) + + assert.Equal(t, http.StatusInternalServerError, w.Code) + assert.Contains(t, w.Body.String(), "mocked error") +} diff --git a/alfred/cmd/ingester/app/handler/web_client_manager.go b/alfred/cmd/ingester/app/handler/web_client_manager.go new file mode 100644 index 0000000..f139be5 --- /dev/null +++ b/alfred/cmd/ingester/app/handler/web_client_manager.go @@ -0,0 +1,43 @@ +package handler + +import ( + "alfred/cmd/ingester/app/factory" + "alfred/cmd/ingester/app/service/interfaces" + "alfred/config" + kafka "alfred/pkg/kafka/produce" + "errors" +) + +type WebClientManager interface { + GetWebClientByType(clientName string) (interfaces.WebClient, error) +} + +type WebClientManagerImpl struct { + Producer kafka.KProducer + WebClientFactories map[string]factory.WebClientFactory +} + +func NewWebClientManagerImpl(producer kafka.KProducer) *WebClientManagerImpl { + WebClientFactories := map[string]factory.WebClientFactory{} + var defaultFactoryWebClientWithCookieList = config.GetIngesterConfig().DefaultFactoryWebClientListWithCookieData + for _, client := range defaultFactoryWebClientWithCookieList { + WebClientFactories[client] = &factory.DefaultWebClientWithCookieDataFactory{} + } + var defaultFactoryWebClientList = config.GetIngesterConfig().DefaultFactoryWebClientList + for _, client := range defaultFactoryWebClientList { + WebClientFactories[client] = &factory.DefaultWebFactory{} + } + return &WebClientManagerImpl{ + Producer: producer, + WebClientFactories: WebClientFactories, + } +} + +func (wcm *WebClientManagerImpl) GetWebClientByType(clientName string) (interfaces.WebClient, error) { + webClientFactory, found := wcm.WebClientFactories[clientName] + if !found { + return nil, errors.New("invalid client name: " + clientName) + } + webClientFactory.Initialize(wcm.Producer) + return webClientFactory.CreateWebClient(clientName) +} diff --git a/alfred/cmd/ingester/app/helper/cache_helper.go b/alfred/cmd/ingester/app/helper/cache_helper.go new file mode 100644 index 0000000..c1069ae --- /dev/null +++ b/alfred/cmd/ingester/app/helper/cache_helper.go @@ -0,0 +1,8 @@ +package helper + +import "alfred/utils" + +func GenerateCacheKey(appVersionName, appOs string, clientName string) string { + cacheKey := clientName + utils.UNDERSCORE + appVersionName + utils.UNDERSCORE + appOs + return cacheKey +} diff --git a/alfred/cmd/ingester/app/helper/fps_algorithms.go b/alfred/cmd/ingester/app/helper/fps_algorithms.go new file mode 100644 index 0000000..25e35c7 --- /dev/null +++ b/alfred/cmd/ingester/app/helper/fps_algorithms.go @@ -0,0 +1,24 @@ +package helper + +import ( + "alfred/model/ingester" + "alfred/utils" +) + +func WeightedAlgorithm(deviceInfo ingester.DeviceAndNetworkAttributes, weightMap, batteryMap, memoryMap map[string]interface{}) (float64, float64) { + var wBatteryScore, wMemoryScore float64 + if deviceInfo.DeviceAttributes.Battery >= batteryMap[utils.HIGH].(float64) { + wBatteryScore = weightMap[utils.BATTERY_HIGH].(float64) * deviceInfo.DeviceAttributes.Battery + } else if deviceInfo.DeviceAttributes.Battery >= batteryMap[utils.MEDIUM].(float64) { + wBatteryScore = weightMap[utils.BATTERY_MEDIUM].(float64) * deviceInfo.DeviceAttributes.Battery + } else { + wBatteryScore = weightMap[utils.BATTERY_LOW].(float64) * deviceInfo.DeviceAttributes.Battery + } + + if deviceInfo.DeviceAttributes.Memory >= memoryMap[utils.HIGH].(float64) { + wMemoryScore = weightMap[utils.MEMORY_HIGH].(float64) * (100 - deviceInfo.DeviceAttributes.Memory) + } else { + wMemoryScore = weightMap[utils.MEMORY_LOW].(float64) * (100 - deviceInfo.DeviceAttributes.Memory) + } + return wBatteryScore, wMemoryScore +} diff --git a/alfred/cmd/ingester/app/model/cosmos/CosmosAppEvent.go b/alfred/cmd/ingester/app/model/cosmos/CosmosAppEvent.go new file mode 100644 index 0000000..a623571 --- /dev/null +++ b/alfred/cmd/ingester/app/model/cosmos/CosmosAppEvent.go @@ -0,0 +1,22 @@ +package cosmos + +import "alfred/model/ingester" + +type AppEvent struct { + BaseAttributes BaseAttributes `json:"base_attributes,omitempty"` + Events []EventAttributes `json:"events,omitempty"` +} + +type EventAttributes struct { + EventId string `json:"event_id,omitempty"` + ParentSessionId string `json:"parent_session_id,omitempty"` + SessionId string `json:"session_id,omitempty"` + ScreenName string `json:"screen_name,omitempty"` + ScreenshotTime int64 `json:"screenshot_timestamp,omitempty"` + ModuleName string `json:"module_name,omitempty"` + EventName string `json:"event_name,omitempty"` + EventTimestamp int64 `json:"event_timestamp,omitempty"` + Attributes map[string]interface{} `json:"attributes,omitempty"` + EventType ingester.EventType `json:"event_type,omitempty"` + ZipName string `json:"zip_name,omitempty"` +} diff --git a/alfred/cmd/ingester/app/model/cosmos/SessionUploadRequest.go b/alfred/cmd/ingester/app/model/cosmos/SessionUploadRequest.go new file mode 100644 index 0000000..16abe1f --- /dev/null +++ b/alfred/cmd/ingester/app/model/cosmos/SessionUploadRequest.go @@ -0,0 +1,49 @@ +package cosmos + +type SessionUploadRequest struct { + BaseAttributes BaseAttributes `json:"base_attributes,omitempty"` + SessionUploadEventAttributes SessionUploadEventAttributes `json:"session_upload_event_attributes,omitempty"` +} + +type BaseAttributes struct { + AppVersionCode string `json:"app_version_code,omitempty"` + AppVersionName string `json:"app_version_name,omitempty"` + ClientTs int64 `json:"client_ts,omitempty"` + DeviceId string `json:"device_id,omitempty"` + DeviceModel string `json:"device_model,omitempty"` + DeviceManufacturer string `json:"device_manufacturer,omitempty"` + ScreenResolution string `json:"screen_resolution,omitempty"` + AppOS string `json:"app_os,omitempty"` + OsVersion string `json:"os_version,omitempty"` + Latitude float32 `json:"latitude,omitempty"` + Longitude float32 `json:"longitude,omitempty"` + NetworkType string `json:"network_type,omitempty"` + AgentId string `json:"customer_id,omitempty"` + UpTime int64 `json:"up_time,omitempty"` + CarrierName string `json:"carrier_name,omitempty"` + Metadata map[string]string `json:"metadata,omitempty"` + SessionTimeStamp int64 `json:"session_time_stamp,omitempty"` + EventTimestamp int64 `json:"event_timestamp,omitempty"` + SessionId string `json:"session_id,omitempty"` + ParentSessionId string `json:"parent_session_id,omitempty"` + TraceId string `json:"trace_id,omitempty"` + EventEndTimeStamp int64 `json:"event_end_time_stamp,omitempty"` + PhoneNumber string `json:"phone_number,omitempty"` + HasErrors bool `json:"has_errors,omitempty"` + SnapshotPerSecond int64 `json:"snapshot_per_second,omitempty"` + ImageType string `json:"image_type,omitempty"` + FileTypeExtension string `json:"file_type_extension,omitempty"` +} + +type SessionUploadEventAttributes struct { + BeginningDeviceAttributes DeviceAttributes `json:"beginning_device_attributes,omitempty"` + EndDeviceAttributes DeviceAttributes `json:"end_device_attributes,omitempty"` + EventId string `json:"event_id,omitempty"` +} + +type DeviceAttributes struct { + Battery float64 `json:"battery"` + Cpu float64 `json:"cpu,omitempty"` + Storage float64 `json:"storage,omitempty"` + Memory float64 `json:"memory,omitempty"` +} diff --git a/alfred/cmd/ingester/app/model/navi/NaviUserAppEvent.go b/alfred/cmd/ingester/app/model/navi/NaviUserAppEvent.go new file mode 100644 index 0000000..38ed595 --- /dev/null +++ b/alfred/cmd/ingester/app/model/navi/NaviUserAppEvent.go @@ -0,0 +1,23 @@ +package navi + +import "alfred/model/ingester" + +type AppEvent struct { + BaseAttributes BaseAttributes `json:"base_attributes,omitempty"` + Events []EventAttributes `json:"events,omitempty"` +} + +type EventAttributes struct { + EventId string `json:"event_id,omitempty"` + ParentSessionId string `json:"parent_session_id,omitempty"` + SessionId string `json:"session_id,omitempty"` + ScreenName string `json:"screen_name,omitempty"` + ScreenshotTime int64 `json:"screenshot_timestamp,omitempty"` + FragmentList []string `json:"fragment_list,omitempty"` + ModuleName string `json:"module_name,omitempty"` + EventName string `json:"event_name,omitempty"` + EventTimestamp int64 `json:"event_timestamp,omitempty"` + Attributes map[string]interface{} `json:"attributes,omitempty"` + EventType ingester.EventType `json:"event_type,omitempty"` + ZipName string `json:"zip_name,omitempty"` +} diff --git a/alfred/cmd/ingester/app/model/navi/SessionUploadRequest.go b/alfred/cmd/ingester/app/model/navi/SessionUploadRequest.go new file mode 100644 index 0000000..c27b05d --- /dev/null +++ b/alfred/cmd/ingester/app/model/navi/SessionUploadRequest.go @@ -0,0 +1,49 @@ +package navi + +type SessionUploadRequest struct { + BaseAttributes BaseAttributes `json:"base_attributes,omitempty"` + SessionUploadEventAttributes SessionUploadEventAttributes `json:"session_upload_event_attributes,omitempty"` +} + +type BaseAttributes struct { + AppVersionCode string `json:"app_version_code,omitempty"` + AppVersionName string `json:"app_version_name,omitempty"` + ClientTs int64 `json:"client_ts,omitempty"` + DeviceId string `json:"device_id,omitempty"` + DeviceModel string `json:"device_model,omitempty"` + DeviceManufacturer string `json:"device_manufacturer,omitempty"` + ScreenResolution string `json:"screen_resolution,omitempty"` + AppOS string `json:"app_os,omitempty"` + OsVersion string `json:"os_version,omitempty"` + Latitude float32 `json:"latitude,omitempty"` + Longitude float32 `json:"longitude,omitempty"` + NetworkType string `json:"network_type,omitempty"` + CustomerId string `json:"customer_id,omitempty"` + UpTime int64 `json:"up_time,omitempty"` + CarrierName string `json:"carrier_name,omitempty"` + SessionTimeStamp int64 `json:"session_time_stamp,omitempty"` + EventTimestamp int64 `json:"event_timestamp,omitempty"` + Metadata map[string]string `json:"metadata,omitempty"` + SessionId string `json:"session_id,omitempty"` + ParentSessionId string `json:"parent_session_id,omitempty"` + TraceId string `json:"trace_id,omitempty"` + EventEndTimeStamp int64 `json:"event_end_time_stamp,omitempty"` + SnapshotPerSecond int64 `json:"snapshot_per_second,omitempty"` + PhoneNumber string `json:"phone_number,omitempty"` + HasErrors bool `json:"has_errors,omitempty"` + ImageType string `json:"image_type,omitempty"` + FileTypeExtension string `json:"file_type_extension,omitempty"` +} + +type SessionUploadEventAttributes struct { + BeginningDeviceAttributes DeviceAttributes `json:"beginning_device_attributes,omitempty"` + EndDeviceAttributes DeviceAttributes `json:"end_device_attributes,omitempty"` + EventId string `json:"event_id,omitempty"` +} + +type DeviceAttributes struct { + Battery float64 `json:"battery"` + Cpu float64 `json:"cpu,omitempty"` + Storage float64 `json:"storage,omitempty"` + Memory float64 `json:"memory,omitempty"` +} diff --git a/alfred/cmd/ingester/app/server.go b/alfred/cmd/ingester/app/server.go new file mode 100644 index 0000000..6f6a6b5 --- /dev/null +++ b/alfred/cmd/ingester/app/server.go @@ -0,0 +1,172 @@ +package app + +import ( + "alfred/cmd/ingester/app/handler" + "alfred/cmd/ingester/app/validation" + "alfred/config" + "alfred/internal/clients" + "alfred/internal/metrics" + kafka "alfred/pkg/kafka/produce" + "alfred/pkg/log" + "alfred/pkg/s3" + "alfred/repository" + "alfred/repositoryAccessLayer" + "alfred/utils" + "fmt" + "github.com/gin-gonic/gin" + httpTrace "github.com/navi-commons/go-tracer/http" + "go.elastic.co/apm/module/apmgin/v2" + "go.uber.org/zap" + "net/http" + "strconv" + "time" +) + +type Server struct { + gin *gin.Engine + producer kafka.KProducer + repositories *repositoryAccessLayer.RepositoryAccessLayer + s3Handler *handler.S3Handler + httpClient *clients.HttpClient + metricsMiddleware *gin.RouterGroup +} + +func NewServer(gin *gin.Engine) *Server { + httpClient := clients.NewHttpClient(config.GetIngesterConfig().HttpConfig) + kP := kafka.NewKProducer(config.GetIngesterConfig().BaseConfig.Env, config.GetIngesterConfig().KafkaConfig.BaseConfig) + s3Client := s3.NewS3Client() + esConfig := config.GetIngesterConfig().ElasticSearchConfig.BaseConfig + repositories := repository.InitRepositories(esConfig) + repositoryAccessLayer := repositoryAccessLayer.InitRepositoryAccessLayer(repositories) + + return &Server{ + gin: gin, + producer: kP, + repositories: repositoryAccessLayer, + s3Handler: handler.NewS3Handler(s3Client), + httpClient: httpClient, + } +} + +func (s *Server) Handler() { + if config.GetIngesterConfig().TracerConfig.APMEnabled { + s.gin.Use(httpTrace.TracerGinMiddleware()) + } + + s.metricsMiddleware = s.createMetricMiddleware() + s.healthCheckHandler() + s.gin.Use(apmgin.Middleware(s.gin)) + s.s3AppPresignHandler() + s.gin.Use(s.corsHandler()) + s.ingesterHandler() + metrics.AdminHandler(config.GetIngesterConfig().BaseConfig.MetricPort) + s.cruiseControlHandler() +} + +func (s *Server) createMetricMiddleware() *gin.RouterGroup { + return s.gin.Group("", func(c *gin.Context) { + startTime := time.Now() + c.Next() + endTime := float64(time.Since(startTime)) + status := strconv.Itoa(c.Writer.Status()) + metrics.AlfredApiRequestCounter.WithLabelValues(c.FullPath(), c.Request.Method, status).Inc() + metrics.AlfredApiRequestLatencySum.WithLabelValues(c.FullPath(), c.Request.Method, status).Add(endTime) + metrics.AlfredApiRequestLatencyHistogram.WithLabelValues(c.FullPath(), c.Request.Method, status).Observe(endTime) + metrics.AlfredApiRequestLatencySummary.WithLabelValues(c.FullPath(), c.Request.Method, status).Observe(endTime) + }) + +} + +func (s *Server) s3HandlerMetricMiddleware(requestUrl string) *gin.RouterGroup { + return s.gin.Group(utils.EMPTY, func(c *gin.Context) { + startTime := time.Now() + c.Next() + endTime := float64(time.Since(startTime)) + status := strconv.Itoa(c.Writer.Status()) + metrics.AlfredApiRequestCounter.WithLabelValues(requestUrl, c.Request.Method, status).Inc() + metrics.AlfredApiRequestLatencySum.WithLabelValues(requestUrl, c.Request.Method, status).Add(endTime) + metrics.AlfredApiRequestLatencyHistogram.WithLabelValues(requestUrl, c.Request.Method, status).Observe(endTime) + metrics.AlfredApiRequestLatencySummary.WithLabelValues(requestUrl, c.Request.Method, status).Observe(endTime) + }) + +} + +func (s *Server) cruiseControlHandler() { + cruiseMiddleware := validation.RateControl(utils.CRUISE_CONTROL) + cruiseV2Middleware := validation.RateControl(utils.CRUISE_CONTROL_V2) + ccc := handler.NewCruiseControlHandler(s.repositories, s.httpClient) + s.metricsMiddleware.GET(utils.CRUISE_CONTROL, cruiseMiddleware, ccc.FetchCruiseControlConfig) + s.metricsMiddleware.GET(utils.CRUISE_CONTROL_V2, cruiseV2Middleware, ccc.FetchCruiseControlConfigV2) + s.metricsMiddleware.POST("/invalidate/cache", ccc.InvalidateCruiseCache) +} + +func (s *Server) ingesterHandler() { + ingestWebSessionV2Middleware := validation.RateControl(utils.WEB_SESSIONS_V2) + ih := handler.NewIngesterHandler(s.producer, s.repositories, s.httpClient) + s.metricsMiddleware.POST("/ingest/event", ih.IngestEvent) + s.metricsMiddleware.POST("/ingest/session", ih.IngestSessionRecordingEvent) + s.metricsMiddleware.POST("/ingest/metrics", ih.IngestMetrics) + s.metricsMiddleware.POST("/v2/ingest/metrics", ih.IngestMetricsV2) + s.metricsMiddleware.POST("/v2/ingest/session", ih.IngestSessionRecordingEventV2) + s.metricsMiddleware.POST("/v2/ingest/event", ih.IngestEventV2) + s.gin.POST(utils.WEB_SESSIONS_V2, ingestWebSessionV2Middleware, ih.IngestWebSessionRecordingEventV2) + s.metricsMiddleware.POST("/ingest/app/crashes", ih.IngestEventV2) + s.gin.GET("/ingest/web/session/pre-sign/:fileName", s.s3Handler.FetchPreSignedUploadUrlV2) +} + +func (s *Server) corsHandler() gin.HandlerFunc { + return func(c *gin.Context) { + whitelistedDomains := getWhitelistedDomains() + + c.Writer.Header().Set("Access-Control-Allow-Methods", "POST, OPTIONS, GET, PUT") + c.Writer.Header().Set("Access-Control-Allow-Headers", config.NewCorsConfig().AllowedCustomHeaders) + origin := c.Request.Header.Get("Origin") + if !whitelistedDomains[origin] { + c.AbortWithStatus(http.StatusUnauthorized) + return + } + c.Writer.Header().Set("Access-Control-Allow-Origin", origin) + + if c.Request.Method == "OPTIONS" { + c.AbortWithStatus(http.StatusOK) + return + } + + startTime := time.Now() + c.Next() + endTime := float64(time.Since(startTime)) + status := strconv.Itoa(c.Writer.Status()) + metrics.AlfredApiRequestCounter.WithLabelValues(c.FullPath(), c.Request.Method, status).Inc() + metrics.AlfredApiRequestLatencySum.WithLabelValues(c.FullPath(), c.Request.Method, status).Add(endTime) + metrics.AlfredApiRequestLatencyHistogram.WithLabelValues(c.FullPath(), c.Request.Method, status).Observe(endTime) + metrics.AlfredApiRequestLatencySummary.WithLabelValues(c.FullPath(), c.Request.Method, status).Observe(endTime) + } +} + +func getWhitelistedDomains() map[string]bool { + allowedList := make(map[string]bool) + domains := config.NewCorsConfig().WhitelistedDomains + for _, domain := range domains { + domainLocal := domain + allowedList[domainLocal] = true + } + return allowedList +} + +func (s *Server) s3AppPresignHandler() { + s.s3HandlerMetricMiddleware("/ingest/session/pre-sign").GET("/ingest/session/pre-sign/:sessionId", s.s3Handler.FetchPresignedUploadURL) + s.s3HandlerMetricMiddleware("/v2/ingest/session/pre-sign").GET("/v2/ingest/session/pre-sign/:fileName", s.s3Handler.FetchPreSignedUploadUrlV2) +} + +func (s *Server) Start() { + log.Info("starting alfred ingester server", + zap.String("port", strconv.Itoa(config.GetIngesterConfig().BaseConfig.Port))) + s.gin.Run(fmt.Sprintf(":%v", config.GetIngesterConfig().BaseConfig.Port)) + +} + +func (s *Server) healthCheckHandler() { + s.metricsMiddleware.GET("/ping", func(c *gin.Context) { + c.String(http.StatusOK, "pong") + }) +} diff --git a/alfred/cmd/ingester/app/service/cosmos_app.go b/alfred/cmd/ingester/app/service/cosmos_app.go new file mode 100644 index 0000000..697f8af --- /dev/null +++ b/alfred/cmd/ingester/app/service/cosmos_app.go @@ -0,0 +1,174 @@ +package service + +import ( + "alfred/cmd/ingester/app/adapter/cosmos_adapter" + "alfred/cmd/ingester/app/helper" + "alfred/cmd/ingester/app/model/cosmos" + validator "alfred/cmd/ingester/app/validation" + "alfred/config" + "alfred/internal/clients" + "alfred/model/es" + "alfred/model/ingester" + kafka "alfred/pkg/kafka/produce" + "alfred/pkg/log" + "alfred/repositoryAccessLayer" + "alfred/utils" + "errors" + "github.com/gin-gonic/gin" + "net/http" + "strings" +) + +type CosmosApp struct { + producer kafka.KProducer + CommonValidator validator.CommonValidationStrategy + cosmosValidator validator.CosmosValidationStrategy + cosmosAppEventAdapter cosmos_adapter.CosmosAppEventAdapter + cosmosSessionUploadAdapter cosmos_adapter.CosmosSessionUploadRequestAdapter + cruiseControlAccessLayer repositoryAccessLayer.CruiseControlAccessLayer + litmusProxyClient clients.LitmusProxyClient + cruiseControlService CruiseControlService +} + +func NewCosmosAppHandler(producer kafka.KProducer, repositories *repositoryAccessLayer.RepositoryAccessLayer, httpClient *clients.HttpClient) *CosmosApp { + return &CosmosApp{ + producer: producer, + CommonValidator: validator.CommonValidationStrategyImpl{}, + cosmosValidator: &validator.CosmosValidationStrategyImpl{}, + cruiseControlAccessLayer: repositories.CruiseControlAccessLayer, + litmusProxyClient: &clients.LitmusProxyClientImpl{HttpClient: httpClient.HttpClient}, + cruiseControlService: NewCruiseControlService(repositories), + } +} + +func (ca *CosmosApp) FetchCruise(appVersionName string, osVersion string, appVersionCode string, index string, deviceId, appOs string) (*es.ESResponse, error) { + var values *es.ESResponse + + isExperimentEnabled := ca.litmusProxyClient.GetLitmusExperimentOutput(deviceId, config.GetIngesterConfig().OutboundServiceConfig.LitmusAlfredExperimentMap[utils.COSMOS_ENABLE_EXPERIMENT]) + if !isExperimentEnabled { + return nil, errors.New("DeviceId not in the Experiment") + } + valuesFromDb, err := ca.cruiseControlAccessLayer.FetchCruiseControlConfig(appVersionName, appOs, index) + if err != nil { + return nil, err + } + if valuesFromDb.Hits.Total.Value == 0 { + valuesFromDb, err = ca.cruiseControlService.FetchPreviousCruise(appVersionName, osVersion, appVersionCode, appOs, index) + if err != nil { + return nil, err + } + } + + values = valuesFromDb + + return values, nil +} + +func (ca *CosmosApp) GetFpsForDevice(deviceInfo ingester.DeviceAndNetworkAttributes) int { + thresholdScore := config.GetIngesterConfig().FpsThresholdScoreMap + weightMap := config.GetIngesterConfig().FpsWeightMapCosmos + batteryMap := config.GetIngesterConfig().FpsBatteryMapCosmos + memoryMap := config.GetIngesterConfig().FpsMemoryMapCosmos + + wBatteryScore, wMemoryScore := helper.WeightedAlgorithm(deviceInfo, weightMap, batteryMap, memoryMap) + + score := wBatteryScore + wMemoryScore + + if score > thresholdScore[utils.COSMOS].(float64) && (strings.ToLower(deviceInfo.NetworkType) == utils.FIVE_G || strings.ToLower(deviceInfo.NetworkType) == utils.FOUR_G || strings.ToLower(deviceInfo.NetworkType) == utils.WIFI) && + wBatteryScore > 0 && wMemoryScore > 0 && + deviceInfo.NetworkStrength/1024 > weightMap[utils.NETWORK_STRENGTH].(float64) { + return utils.RECORDING_2_FPS + } + return utils.DEFAULT_RECORDING_FPS +} + +func (ca *CosmosApp) IngestSession(c *gin.Context) { + + var requestBody cosmos.SessionUploadRequest + if err := c.ShouldBindJSON(&requestBody); err != nil { + c.JSON(http.StatusBadRequest, utils.ErrorResponse(err, http.StatusBadRequest, nil)) + return + } + + mappedRequest, err := ca.adaptSessionUploadRequest(requestBody) + if err != nil { + log.Error("Error while adapting the request Body of cosmos session upload") + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, nil)) + return + } + + if err := ca.validateRequest(c, requestBody, mappedRequest); err != nil { + return + } + + err = ca.producer.SendMessage(mappedRequest, config.GetIngesterConfig().KafkaConfig.SessionUploadTopicClientMap[utils.COSMOS], mappedRequest.BaseAttributes.DeviceId, utils.COSMOS) + if err != nil { + log.Error("Error while publishing cosmos app session to kafka") + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, nil)) + } +} + +func (ca *CosmosApp) IngestEvent(c *gin.Context) { + + var requestBody cosmos.AppEvent + if err := c.ShouldBindJSON(&requestBody); err != nil { + c.JSON(http.StatusBadRequest, utils.ErrorResponse(err, http.StatusBadRequest, nil)) + return + } + + //Todo add validator for ingesting events + + mappedRequest, err := ca.adaptAppEventRequest(requestBody) + if err != nil { + log.Error("Error while adapting the request body of app events ingestion") + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, nil)) + return + } + + err = ca.producer.SendMessage(mappedRequest, config.GetIngesterConfig().KafkaConfig.EventIngestionTopicClientMap[utils.COSMOS], mappedRequest.BaseAttributes.DeviceId, utils.COSMOS) + + if err != nil { + log.Error("Error while publishing cosmos app event to kafka") + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, nil)) + } + +} + +func (ca *CosmosApp) IngestAppMetrics(c *gin.Context) { + var appMetrics ingester.AppMetrics + if err := c.ShouldBindJSON(&appMetrics); err != nil { + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, nil)) + return + } + + err := ca.producer.SendMessage(appMetrics, config.GetIngesterConfig().KafkaConfig.MetricsIngestionTopicClientMap[utils.COSMOS], appMetrics.BaseAttributes.DeviceId, utils.COSMOS) + if err != nil { + log.Error("Error while publishing cosmos app metrics to kafka") + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, nil)) + } +} + +func (ca *CosmosApp) validateRequest(c *gin.Context, cosmosRequest cosmos.SessionUploadRequest, request ingester.SessionUploadRequest) error { + if err := ca.CommonValidator.Validate(c, request); err != nil { + return err + } + if err := ca.cosmosValidator.Validate(c, cosmosRequest); err != nil { + return err + } + + return nil +} + +func (ca *CosmosApp) adaptSessionUploadRequest(requestBody cosmos.SessionUploadRequest) (ingester.SessionUploadRequest, error) { + ca.cosmosSessionUploadAdapter = cosmos_adapter.CosmosSessionUploadRequestAdapterImpl{ + CosmosSessionUploadRequest: requestBody, + } + return ca.cosmosSessionUploadAdapter.AdaptSessionUploadRequestCosmos() +} + +func (ca *CosmosApp) adaptAppEventRequest(requestBody cosmos.AppEvent) (ingester.AppEvent, error) { + ca.cosmosAppEventAdapter = cosmos_adapter.CosmosAppEventAdapterImpl{ + CosmosAppEvent: requestBody, + } + return ca.cosmosAppEventAdapter.AdaptAppEventRequestCosmos() +} diff --git a/alfred/cmd/ingester/app/service/cosmos_app_test.go b/alfred/cmd/ingester/app/service/cosmos_app_test.go new file mode 100644 index 0000000..3224b5a --- /dev/null +++ b/alfred/cmd/ingester/app/service/cosmos_app_test.go @@ -0,0 +1,252 @@ +package service + +import ( + "alfred/cmd/ingester/app/model/cosmos" + "alfred/cmd/ingester/app/model/navi" + "alfred/mocks" + "alfred/model/ingester" + "alfred/pkg/log" + "bytes" + "encoding/json" + "errors" + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "io" + "net/http" + "net/http/httptest" + "testing" +) + +func TestCosmosApp_IngestSession(t *testing.T) { + log.InitLogger() + + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + mockKProducer := new(mocks.MockKProducer) + mockCosmosValidationStrategy := new(mocks.MockCosmosValidationStrategy) + mockCommonValidationStrategy := new(mocks.MockCommonValidationStrategy) + cosmosApp := CosmosApp{producer: mockKProducer, CommonValidator: mockCommonValidationStrategy, cosmosValidator: mockCosmosValidationStrategy} + + t.Run("Invalid Request Body", func(t *testing.T) { + cosmosApp.IngestSession(c) + assert.Equal(t, http.StatusBadRequest, w.Code) + }) + t.Run("Common Validation Failed", func(t *testing.T) { + + validData := cosmos.SessionUploadRequest{} + validJsonData, _ := json.Marshal(validData) + validRequestBody := bytes.NewBuffer(validJsonData) + c.Request = &http.Request{Body: io.NopCloser(validRequestBody)} + + mockCommonValidationStrategy.On("Validate", mock.Anything, mock.Anything).Return(errors.New("mocked Error")) + + cosmosApp.IngestSession(c) + + mockCommonValidationStrategy.AssertExpectations(t) + + assert.Equal(t, http.StatusBadRequest, w.Code) + + }) + +} + +func TestCosmosApp_ingestSession_FailCosmosValidation(t *testing.T) { + log.InitLogger() + + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + mockKProducer := new(mocks.MockKProducer) + mockCosmosValidationStrategy := new(mocks.MockCosmosValidationStrategy) + mockCommonValidationStrategy := new(mocks.MockCommonValidationStrategy) + cosmosApp := CosmosApp{producer: mockKProducer, CommonValidator: mockCommonValidationStrategy, cosmosValidator: mockCosmosValidationStrategy} + validData := cosmos.SessionUploadRequest{} + validJsonData, _ := json.Marshal(validData) + validRequestBody := bytes.NewBuffer(validJsonData) + c.Request = &http.Request{Body: io.NopCloser(validRequestBody)} + + mockCommonValidationStrategy.On("Validate", mock.Anything, mock.Anything).Return(nil) + mockCosmosValidationStrategy.On("Validate", mock.Anything, mock.Anything).Return(errors.New("mocked Error")) + + cosmosApp.IngestSession(c) + + mockCosmosValidationStrategy.AssertExpectations(t) + mockCommonValidationStrategy.AssertExpectations(t) + + assert.Equal(t, http.StatusOK, w.Code) +} + +func TestCosmosApp_ingestSession_FailKafka(t *testing.T) { + log.InitLogger() + + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + mockKProducer := new(mocks.MockKProducer) + mockCosmosValidationStrategy := new(mocks.MockCosmosValidationStrategy) + mockCommonValidationStrategy := new(mocks.MockCommonValidationStrategy) + cosmosApp := CosmosApp{producer: mockKProducer, CommonValidator: mockCommonValidationStrategy, cosmosValidator: mockCosmosValidationStrategy} + + validData := cosmos.SessionUploadRequest{} + validJsonData, _ := json.Marshal(validData) + validRequestBody := bytes.NewBuffer(validJsonData) + c.Request = &http.Request{Body: io.NopCloser(validRequestBody)} + + mockCommonValidationStrategy.On("Validate", mock.Anything, mock.Anything).Return(nil) + mockCosmosValidationStrategy.On("Validate", mock.Anything, mock.Anything).Return(nil) + mockKProducer.On("SendMessage", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(errors.New("mocked Error")) + + cosmosApp.IngestSession(c) + + mockCommonValidationStrategy.AssertExpectations(t) + mockCosmosValidationStrategy.AssertExpectations(t) + mockKProducer.AssertExpectations(t) + + assert.Equal(t, http.StatusInternalServerError, w.Code) +} + +func TestCosmosApp_ingestSession_Pass(t *testing.T) { + log.InitLogger() + + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + mockKProducer := new(mocks.MockKProducer) + mockCosmosValidationStrategy := new(mocks.MockCosmosValidationStrategy) + mockCommonValidationStrategy := new(mocks.MockCommonValidationStrategy) + cosmosApp := CosmosApp{producer: mockKProducer, CommonValidator: mockCommonValidationStrategy, cosmosValidator: mockCosmosValidationStrategy} + + validData := cosmos.SessionUploadRequest{} + validJsonData, _ := json.Marshal(validData) + validRequestBody := bytes.NewBuffer(validJsonData) + c.Request = &http.Request{Body: io.NopCloser(validRequestBody)} + + mockCommonValidationStrategy.On("Validate", mock.Anything, mock.Anything).Return(nil) + mockCosmosValidationStrategy.On("Validate", mock.Anything, mock.Anything).Return(nil) + mockKProducer.On("SendMessage", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + + cosmosApp.IngestSession(c) + + mockCommonValidationStrategy.AssertExpectations(t) + mockCosmosValidationStrategy.AssertExpectations(t) + mockKProducer.AssertExpectations(t) + + assert.Equal(t, http.StatusOK, w.Code) +} + +func TestCosmosApp_IngestEvent(t *testing.T) { + log.InitLogger() + + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + mockKProducer := new(mocks.MockKProducer) + mockCommonValidationStrategy := new(mocks.MockCommonValidationStrategy) + cosmosApp := CosmosApp{producer: mockKProducer, CommonValidator: mockCommonValidationStrategy} + + t.Run("Invalid Request Body", func(t *testing.T) { + cosmosApp.IngestEvent(c) + assert.Equal(t, http.StatusBadRequest, w.Code) + }) + +} + +func TestCosmosApp_ingestEvent_FailKafka(t *testing.T) { + log.InitLogger() + + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + mockKProducer := new(mocks.MockKProducer) + cosmosApp := CosmosApp{producer: mockKProducer} + + validData := cosmos.AppEvent{} + validJsonData, _ := json.Marshal(validData) + validRequestBody := bytes.NewBuffer(validJsonData) + c.Request = &http.Request{Body: io.NopCloser(validRequestBody)} + + mockKProducer.On("SendMessage", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(errors.New("mocked Error")) + + cosmosApp.IngestEvent(c) + + mockKProducer.AssertExpectations(t) + + assert.Equal(t, http.StatusInternalServerError, w.Code) +} + +func TestCosmosApp_ingestEvent_Pass(t *testing.T) { + log.InitLogger() + + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + mockKProducer := new(mocks.MockKProducer) + cosmosApp := CosmosApp{producer: mockKProducer} + + validData := cosmos.AppEvent{} + validJsonData, _ := json.Marshal(validData) + validRequestBody := bytes.NewBuffer(validJsonData) + c.Request = &http.Request{Body: io.NopCloser(validRequestBody)} + + mockKProducer.On("SendMessage", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + + cosmosApp.IngestEvent(c) + + mockKProducer.AssertExpectations(t) + + assert.Equal(t, http.StatusOK, w.Code) +} + +func TestCosmosApp_ingestAppMetrics_FailJson(t *testing.T) { + log.InitLogger() + + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + mockKProducer := new(mocks.MockKProducer) + mockCommonValidationStrategy := new(mocks.MockCommonValidationStrategy) + cosmosApp := CosmosApp{producer: mockKProducer, CommonValidator: mockCommonValidationStrategy} + + cosmosApp.IngestAppMetrics(c) + + assert.Equal(t, http.StatusInternalServerError, w.Code) +} + +func TestCosmosApp_ingestAppMetrics_FailKafka(t *testing.T) { + log.InitLogger() + + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + mockKProducer := new(mocks.MockKProducer) + mockCommonValidationStrategy := new(mocks.MockCommonValidationStrategy) + cosmosApp := CosmosApp{producer: mockKProducer, CommonValidator: mockCommonValidationStrategy} + + validData := ingester.AppMetrics{} + validJsonData, _ := json.Marshal(validData) + validRequestBody := bytes.NewBuffer(validJsonData) + c.Request = &http.Request{Body: io.NopCloser(validRequestBody)} + + mockKProducer.On("SendMessage", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(errors.New("mocked Error")) + + cosmosApp.IngestAppMetrics(c) + + mockKProducer.AssertExpectations(t) + + assert.Equal(t, http.StatusInternalServerError, w.Code) +} + +func TestCosmosApp_ingestAppMetrics_Pass(t *testing.T) { + log.InitLogger() + + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + mockKProducer := new(mocks.MockKProducer) + cosmosApp := CosmosApp{producer: mockKProducer} + + validData := navi.AppEvent{} + validJsonData, _ := json.Marshal(validData) + validRequestBody := bytes.NewBuffer(validJsonData) + c.Request = &http.Request{Body: io.NopCloser(validRequestBody)} + + mockKProducer.On("SendMessage", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + + cosmosApp.IngestAppMetrics(c) + + mockKProducer.AssertExpectations(t) + + assert.Equal(t, http.StatusOK, w.Code) +} diff --git a/alfred/cmd/ingester/app/service/cruise_control_service.go b/alfred/cmd/ingester/app/service/cruise_control_service.go new file mode 100644 index 0000000..b6bcbf8 --- /dev/null +++ b/alfred/cmd/ingester/app/service/cruise_control_service.go @@ -0,0 +1,65 @@ +package service + +import ( + "alfred/mapper" + "alfred/model/core/cruise" + "alfred/model/es" + "alfred/repositoryAccessLayer" +) + +type CruiseControlService interface { + FetchPreviousCruise(appVersionName, osVersion, appVersionCode, appOs, index string) (*es.ESResponse, error) +} + +type CruiseControlServiceImpl struct { + cruiseControlAccessLayer repositoryAccessLayer.CruiseControlAccessLayer +} + +func NewCruiseControlService(repositories *repositoryAccessLayer.RepositoryAccessLayer) *CruiseControlServiceImpl { + return &CruiseControlServiceImpl{ + cruiseControlAccessLayer: repositories.CruiseControlAccessLayer, + } +} + +func (cc *CruiseControlServiceImpl) FetchPreviousCruise(appVersionName, osVersion, appVersionCode, appOs, index string) (*es.ESResponse, error) { + previousAppVersion, err := cc.cruiseControlAccessLayer.FetchPreviousAppVersion(index, appOs) + if err != nil { + return nil, err + } + var valuesFromDb *es.ESResponse + valuesFromDb, err = cc.cruiseControlAccessLayer.FetchCruiseControlConfig(previousAppVersion, appOs, index) + if err != nil { + return nil, err + } + if len(valuesFromDb.Hits.Hits) > 0 { + valuesFromDb.Hits.Hits[0].ID = appVersionName + valuesFromDb.Hits.Hits[0].Source["os_config"].(map[string]interface{})["app_version"] = appVersionName + valuesFromDb.Hits.Hits[0].Source["os_config"].(map[string]interface{})["os_version"] = osVersion + valuesFromDb.Hits.Hits[0].Source["os_config"].(map[string]interface{})["app_version_code"] = appVersionCode + } + _ = cc.createCruise(valuesFromDb, index) + valuesFromDb, err = cc.cruiseControlAccessLayer.FetchCruiseControlConfig(appVersionName, appOs, index) + if err != nil { + return nil, err + } + + return valuesFromDb, nil +} + +func (cc *CruiseControlServiceImpl) createCruise(resp *es.ESResponse, index string) error { + var cruiseControlRequest cruise.ControlConfig + + value := resp.Hits.Hits[0].Source + + cruiseControlRequest, err := mapper.MapEsResponseToCruiseControlConfig(value) + if err != nil { + return err + } + + err = cc.cruiseControlAccessLayer.CreateCruiseControlConfig(&cruiseControlRequest, index) + if err != nil { + return err + } + + return nil +} diff --git a/alfred/cmd/ingester/app/service/defaultWebClient.go b/alfred/cmd/ingester/app/service/defaultWebClient.go new file mode 100644 index 0000000..a3e74a4 --- /dev/null +++ b/alfred/cmd/ingester/app/service/defaultWebClient.go @@ -0,0 +1,54 @@ +package service + +import ( + "alfred/config" + "alfred/model/ingester" + kafka "alfred/pkg/kafka/produce" + "alfred/pkg/log" + "alfred/utils" + "github.com/gin-gonic/gin" + "go.uber.org/zap" + "net/http" + "strconv" +) + +type DefaultWebClient struct { + producer kafka.KProducer +} + +func NewDefaultWebClientHandler(producer kafka.KProducer) *DefaultWebClient { + return &DefaultWebClient{ + producer: producer, + } +} + +func (lh *DefaultWebClient) IngestWebSession(c *gin.Context, webClientName string) { + var requestBody ingester.WebSessionUploadRequest + if err := c.ShouldBindJSON(&requestBody); err != nil { + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, nil)) + return + } + if config.GetIngesterConfig().IsWebSessionDataSliceEnabledClientMap[webClientName] == "true" && len(requestBody.SessionAttributes.Data) > 0 && len(requestBody.SessionAttributes.Data) > config.GetIngesterConfig().WebSessionDataSliceSize { + c.JSON(http.StatusOK, utils.SuccessResponse("Web session data ingested successfully", http.StatusOK)) + go func() { + webData := requestBody.SessionAttributes.Data + webDataSlices := utils.CreateSlicesFromStringList(webData, config.GetIngesterConfig().WebSessionDataSliceSize) + zip_name := requestBody.SessionAttributes.EventId + for count, webDataSlice := range webDataSlices { + requestBody.SessionAttributes.Data = webDataSlice + requestBody.SessionAttributes.DataCounter = int64(count + 1) + requestBody.SessionAttributes.EventId = zip_name + "_" + strconv.FormatInt(requestBody.SessionAttributes.DataCounter, 10) + err := lh.producer.SendMessage(requestBody, config.GetIngesterConfig().KafkaConfig.WebSessionUploadTopicClientMap[webClientName], requestBody.BaseAttributes.SessionId, webClientName) + if err != nil { + log.Error("Error while publishing default web client session to kafka", zap.String("webClientName", webClientName), zap.String("eventId", requestBody.SessionAttributes.EventId), zap.String("sessionId", requestBody.BaseAttributes.SessionId)) + } + } + }() + } else { + err := lh.producer.SendMessage(requestBody, config.GetIngesterConfig().KafkaConfig.WebSessionUploadTopicClientMap[webClientName], requestBody.BaseAttributes.SessionId, webClientName) + if err != nil { + log.Error("Error while publishing default session to kafka", zap.String("webClientName", webClientName)) + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, nil)) + } + } +} diff --git a/alfred/cmd/ingester/app/service/defaultWebClientWithCookieData.go b/alfred/cmd/ingester/app/service/defaultWebClientWithCookieData.go new file mode 100644 index 0000000..10a2d66 --- /dev/null +++ b/alfred/cmd/ingester/app/service/defaultWebClientWithCookieData.go @@ -0,0 +1,72 @@ +package service + +import ( + "alfred/config" + "alfred/model/ingester" + kafka "alfred/pkg/kafka/produce" + "alfred/pkg/log" + "alfred/utils" + "github.com/gin-gonic/gin" + "go.uber.org/zap" + "net/http" + "strconv" + "strings" +) + +type DefaultWebClientWithCookieData struct { + producer kafka.KProducer +} + +func NewDefaultWebClientWithCookieDataHandler(producer kafka.KProducer) *DefaultWebClientWithCookieData { + return &DefaultWebClientWithCookieData{ + producer: producer, + } +} + +func (tw *DefaultWebClientWithCookieData) IngestWebSession(c *gin.Context, webClientName string) { + var requestBody ingester.WebSessionUploadRequest + if err := c.ShouldBindJSON(&requestBody); err != nil { + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, nil)) + return + } + cookie := c.GetHeader("Cookie") + deviceId := fetchDeviceId(cookie) + requestBody.BaseAttributes.DeviceId = deviceId + if config.GetIngesterConfig().IsWebSessionDataSliceEnabledClientMap[webClientName] == "true" && len(requestBody.SessionAttributes.Data) > 0 && len(requestBody.SessionAttributes.Data) > config.GetIngesterConfig().WebSessionDataSliceSize { + c.JSON(http.StatusOK, utils.SuccessResponse("Web session data ingested successfully", http.StatusOK)) + go func() { + webData := requestBody.SessionAttributes.Data + webDataSlices := utils.CreateSlicesFromStringList(webData, config.GetIngesterConfig().WebSessionDataSliceSize) + zip_name := requestBody.SessionAttributes.EventId + for count, webDataSlice := range webDataSlices { + requestBody.SessionAttributes.Data = webDataSlice + requestBody.SessionAttributes.DataCounter = int64(count + 1) + requestBody.SessionAttributes.EventId = zip_name + utils.UNDERSCORE + strconv.FormatInt(requestBody.SessionAttributes.DataCounter, 10) + err := tw.producer.SendMessage(requestBody, config.GetIngesterConfig().KafkaConfig.WebSessionUploadTopicClientMap[webClientName], requestBody.BaseAttributes.SessionId, webClientName) + if err != nil { + log.Error("Error while publishing web session with cookie data to kafka", zap.String("eventId", requestBody.SessionAttributes.EventId), zap.String("sessionId", requestBody.BaseAttributes.SessionId)) + } + } + }() + } else { + err := tw.producer.SendMessage(requestBody, config.GetIngesterConfig().KafkaConfig.WebSessionUploadTopicClientMap[webClientName], requestBody.BaseAttributes.SessionId, webClientName) + if err != nil { + log.Error("Error while publishing web session with cookie data to kafka") + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, nil)) + } + } +} + +func fetchDeviceId(cookie string) string { + deviceId := "" + if cookie != "" { + cookieList := strings.Split(cookie, ";") + for _, cookie := range cookieList { + if strings.Contains(cookie, utils.DEVICE_ID) { + deviceId = strings.Split(cookie, "=")[1] + break + } + } + } + return strings.TrimSpace(deviceId) +} diff --git a/alfred/cmd/ingester/app/service/defaultWebClient_test.go b/alfred/cmd/ingester/app/service/defaultWebClient_test.go new file mode 100644 index 0000000..3b9aec2 --- /dev/null +++ b/alfred/cmd/ingester/app/service/defaultWebClient_test.go @@ -0,0 +1,55 @@ +package service + +import ( + "alfred/mocks" + "alfred/model/ingester" + "alfred/pkg/log" + "bytes" + "encoding/json" + "errors" + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "io" + "net/http" + "net/http/httptest" + "testing" +) + +func TestLongHorn_IngestWebSession(t *testing.T) { + log.InitLogger() + + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + mockKproducer := new(mocks.MockKProducer) + longhorn := DefaultWebClient{producer: mockKproducer} + + _ = NewDefaultWebClientHandler(mockKproducer) + + t.Run("invalid request body", func(t *testing.T) { + longhorn.IngestWebSession(c) + }) + + t.Run("valid request Body and unsuccessful publishing to kafka", func(t *testing.T) { + validData := ingester.WebSessionUploadRequest{} + validJsonData, _ := json.Marshal(validData) + validRequestBody := bytes.NewBuffer(validJsonData) + expectedError := errors.New("Kafka publish error") + mockKproducer.On("SendMessage", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(expectedError) + c.Request = &http.Request{Body: io.NopCloser(validRequestBody)} + c.JSON(http.StatusInternalServerError, nil) + longhorn.IngestWebSession(c) + assert.Equal(t, http.StatusInternalServerError, w.Code) + mockKproducer.AssertExpectations(t) + }) + + t.Run("valid request Body and successful publishing to kafka", func(t *testing.T) { + validData := ingester.WebSessionUploadRequest{} + validJsonData, _ := json.Marshal(validData) + validRequestBody := bytes.NewBuffer(validJsonData) + mockKproducer.On("SendMessage", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + c.Request = &http.Request{Body: io.NopCloser(validRequestBody)} + longhorn.IngestWebSession(c) + mockKproducer.AssertExpectations(t) + }) +} diff --git a/alfred/cmd/ingester/app/service/interfaces/client.go b/alfred/cmd/ingester/app/service/interfaces/client.go new file mode 100644 index 0000000..1180087 --- /dev/null +++ b/alfred/cmd/ingester/app/service/interfaces/client.go @@ -0,0 +1,15 @@ +package interfaces + +import ( + "alfred/model/es" + "alfred/model/ingester" + "github.com/gin-gonic/gin" +) + +type Client interface { + IngestSession(c *gin.Context) + IngestEvent(c *gin.Context) + IngestAppMetrics(c *gin.Context) + FetchCruise(appVersionName string, osVersion string, appVersionCode string, index string, deviceId, appOs string) (*es.ESResponse, error) + GetFpsForDevice(deviceInfo ingester.DeviceAndNetworkAttributes) int +} diff --git a/alfred/cmd/ingester/app/service/interfaces/web_client.go b/alfred/cmd/ingester/app/service/interfaces/web_client.go new file mode 100644 index 0000000..d1f1702 --- /dev/null +++ b/alfred/cmd/ingester/app/service/interfaces/web_client.go @@ -0,0 +1,7 @@ +package interfaces + +import "github.com/gin-gonic/gin" + +type WebClient interface { + IngestWebSession(c *gin.Context, webClientName string) +} diff --git a/alfred/cmd/ingester/app/service/navi_user_app.go b/alfred/cmd/ingester/app/service/navi_user_app.go new file mode 100644 index 0000000..934f1cc --- /dev/null +++ b/alfred/cmd/ingester/app/service/navi_user_app.go @@ -0,0 +1,185 @@ +package service + +import ( + "alfred/cmd/ingester/app/adapter/navi_adapter" + "alfred/cmd/ingester/app/helper" + "alfred/cmd/ingester/app/model/navi" + validator "alfred/cmd/ingester/app/validation" + "alfred/config" + "alfred/internal/clients" + "alfred/model/es" + "alfred/model/ingester" + "alfred/pkg/cache" + kafka "alfred/pkg/kafka/produce" + "alfred/pkg/log" + "alfred/repositoryAccessLayer" + "alfred/utils" + "github.com/gin-gonic/gin" + "net/http" + "strings" + "time" +) + +type NaviUserApp struct { + producer kafka.KProducer + commonValidator validator.CommonValidationStrategy + naviAppEventAdapter navi_adapter.NaviAppEventAdapter + naviSessionUploadAdapter navi_adapter.NaviSessionUploadRequestAdapter + cruiseControlAccessLayer repositoryAccessLayer.CruiseControlAccessLayer + litmusProxyClient clients.LitmusProxyClient + cruiseControlService CruiseControlService + cacheClient cache.ConfigClientInterface +} + +func NewNaviUserAppHandler(producer kafka.KProducer, repositories *repositoryAccessLayer.RepositoryAccessLayer, httpClient *clients.HttpClient, cache cache.ConfigClientInterface) *NaviUserApp { + return &NaviUserApp{ + producer: producer, + commonValidator: validator.CommonValidationStrategyImpl{}, + cruiseControlAccessLayer: repositories.CruiseControlAccessLayer, + litmusProxyClient: &clients.LitmusProxyClientImpl{HttpClient: httpClient.HttpClient}, + cacheClient: cache, + cruiseControlService: NewCruiseControlService(repositories), + } +} + +func (na *NaviUserApp) FetchCruise(appVersionName string, osVersion string, appVersionCode string, index string, deviceId, appOs string) (*es.ESResponse, error) { + var values *es.ESResponse + if appOs == utils.IOS_OS && !na.litmusProxyClient.GetLitmusExperimentOutput(deviceId, config.GetIngesterConfig().OutboundServiceConfig.LitmusAlfredExperimentMap[utils.NAVI_USER_APP_IOS_EXPERIMENT]) { + return nil, nil + } + cacheKey := helper.GenerateCacheKey(appVersionName, appOs, utils.NAVI_USER_APP) + valueInCache, found := na.cacheClient.Get(cacheKey) + if found { + values = valueInCache.(*es.ESResponse) + } else { + //cache miss + valuesFromDb, err := na.cruiseControlAccessLayer.FetchCruiseControlConfig(appVersionName, appOs, index) + if err != nil { + return nil, err + } + if valuesFromDb.Hits.Total.Value == 0 { + valuesFromDb, err = na.cruiseControlService.FetchPreviousCruise(appVersionName, osVersion, appVersionCode, appOs, index) + if err != nil { + return nil, err + } + } + + na.cacheClient.PutWithTtl(cacheKey, valuesFromDb, config.GetIngesterConfig().CruiseCacheTtl*time.Minute) + values = valuesFromDb + } + return values, nil +} + +func (na *NaviUserApp) GetFpsForDevice(deviceInfo ingester.DeviceAndNetworkAttributes) int { + thresholdScore := config.GetIngesterConfig().FpsThresholdScoreMap + weightMap := config.GetIngesterConfig().FpsWeightMapNavi + batteryMap := config.GetIngesterConfig().FpsBatteryMapNavi + memoryMap := config.GetIngesterConfig().FpsMemoryMapNavi + + wBatteryScore, wMemoryScore := helper.WeightedAlgorithm(deviceInfo, weightMap, batteryMap, memoryMap) + + score := wBatteryScore + wMemoryScore + + if score > thresholdScore[utils.NAVI_USER_APP].(float64) && (strings.ToLower(deviceInfo.NetworkType) == utils.FIVE_G || strings.ToLower(deviceInfo.NetworkType) == utils.FOUR_G || strings.ToLower(deviceInfo.NetworkType) == utils.WIFI) && + wBatteryScore > 0 && wMemoryScore > 0 && + deviceInfo.NetworkStrength/1024 > weightMap[utils.NETWORK_STRENGTH].(float64) { + isExperimentEnabled := na.litmusProxyClient.GetLitmusExperimentOutput(deviceInfo.DeviceId, config.GetIngesterConfig().OutboundServiceConfig.LitmusAlfredExperimentMap[utils.FPS_EXPERIMENT]) + if isExperimentEnabled { + return utils.RECORDING_2_FPS + } + return utils.DEFAULT_RECORDING_FPS + } + return utils.DEFAULT_RECORDING_FPS +} + +func (na *NaviUserApp) IngestSession(c *gin.Context) { + + var requestBody navi.SessionUploadRequest + if err := c.ShouldBindJSON(&requestBody); err != nil { + c.JSON(http.StatusBadRequest, utils.ErrorResponse(err, http.StatusBadRequest, nil)) + return + } + + // to map device id for ios + if requestBody.BaseAttributes.DeviceId == utils.EMPTY { + requestBody.BaseAttributes.DeviceId = c.GetHeader(utils.DEVICE_ID) + } + + mappedRequest, err := na.adaptSessionUploadRequest(requestBody) + if err != nil { + log.Error("Error while adapting the request Body of navi app session upload") + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, nil)) + return + } + + if err := na.validateRequest(c, mappedRequest); err != nil { + return + } + + err = na.producer.SendMessage(mappedRequest, config.GetIngesterConfig().KafkaConfig.SessionUploadTopicClientMap[utils.NAVI_USER_APP], mappedRequest.BaseAttributes.DeviceId, utils.NAVI_USER_APP) + if err != nil { + log.Error("Error while publishing navi app session to kafka") + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, nil)) + } +} + +func (na *NaviUserApp) IngestEvent(c *gin.Context) { + + var requestBody navi.AppEvent + if err := c.ShouldBindJSON(&requestBody); err != nil { + c.JSON(http.StatusBadRequest, utils.ErrorResponse(err, http.StatusBadRequest, nil)) + return + } + + //Todo add validator for ingesting events + + mappedRequest, err := na.adaptAppEventRequest(requestBody) + if err != nil { + log.Error("Error while adapting the request body of navi app events ingestion") + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, nil)) + return + } + + err = na.producer.SendMessage(mappedRequest, config.GetIngesterConfig().KafkaConfig.EventIngestionTopicClientMap[utils.NAVI_USER_APP], mappedRequest.BaseAttributes.DeviceId, utils.NAVI_USER_APP) + + if err != nil { + log.Error("Error while publishing navi app event to kafka") + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, nil)) + } + +} + +func (na *NaviUserApp) IngestAppMetrics(c *gin.Context) { + var appMetrics ingester.AppMetrics + if err := c.ShouldBindJSON(&appMetrics); err != nil { + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, nil)) + return + } + + err := na.producer.SendMessage(appMetrics, config.GetIngesterConfig().KafkaConfig.MetricsIngestionTopicClientMap[utils.NAVI_USER_APP], appMetrics.BaseAttributes.DeviceId, utils.NAVI_USER_APP) + if err != nil { + log.Error("Error while publishing navi app metrics to kafka") + c.JSON(http.StatusInternalServerError, utils.ErrorResponse(err, http.StatusInternalServerError, nil)) + } +} + +func (na *NaviUserApp) validateRequest(c *gin.Context, request ingester.SessionUploadRequest) error { + if err := na.commonValidator.Validate(c, request); err != nil { + return err + } + return nil +} + +func (na *NaviUserApp) adaptSessionUploadRequest(requestBody navi.SessionUploadRequest) (ingester.SessionUploadRequest, error) { + na.naviSessionUploadAdapter = navi_adapter.NaviSessionUploadRequestAdapterImpl{ + NaviSessionUploadRequest: requestBody, + } + return na.naviSessionUploadAdapter.AdaptSessionUploadRequestNavi() +} + +func (na *NaviUserApp) adaptAppEventRequest(requestBody navi.AppEvent) (ingester.AppEvent, error) { + na.naviAppEventAdapter = navi_adapter.NaviAppEventAdapterImpl{ + NaviAppEvent: requestBody, + } + return na.naviAppEventAdapter.AdaptAppEventRequestNavi() +} diff --git a/alfred/cmd/ingester/app/service/navi_user_app_test.go b/alfred/cmd/ingester/app/service/navi_user_app_test.go new file mode 100644 index 0000000..3075fc9 --- /dev/null +++ b/alfred/cmd/ingester/app/service/navi_user_app_test.go @@ -0,0 +1,219 @@ +package service + +import ( + "alfred/cmd/ingester/app/model/navi" + "alfred/mocks" + "alfred/model/ingester" + "alfred/pkg/log" + "bytes" + "encoding/json" + "errors" + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "io" + "net/http" + "net/http/httptest" + "testing" +) + +func TestNaviApp_IngestSession(t *testing.T) { + log.InitLogger() + + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + mockKProducer := new(mocks.MockKProducer) + mockCommonValidationStrategy := new(mocks.MockCommonValidationStrategy) + naviApp := NaviUserApp{producer: mockKProducer, commonValidator: mockCommonValidationStrategy} + + t.Run("Invalid Request Body", func(t *testing.T) { + naviApp.IngestSession(c) + assert.Equal(t, http.StatusBadRequest, w.Code) + }) + t.Run("Common Validation Failed", func(t *testing.T) { + + validData := navi.SessionUploadRequest{} + validJsonData, _ := json.Marshal(validData) + validRequestBody := bytes.NewBuffer(validJsonData) + c.Request = &http.Request{Body: io.NopCloser(validRequestBody)} + + mockCommonValidationStrategy.On("Validate", mock.Anything, mock.Anything).Return(errors.New("mocked Error")) + + naviApp.IngestSession(c) + + mockCommonValidationStrategy.AssertExpectations(t) + + assert.Equal(t, http.StatusBadRequest, w.Code) + + }) + +} + +func TestNaviApp_ingestSession_FailKafka(t *testing.T) { + log.InitLogger() + + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + mockKProducer := new(mocks.MockKProducer) + mockCommonValidationStrategy := new(mocks.MockCommonValidationStrategy) + naviApp := NaviUserApp{producer: mockKProducer, commonValidator: mockCommonValidationStrategy} + + validData := navi.SessionUploadRequest{} + validJsonData, _ := json.Marshal(validData) + validRequestBody := bytes.NewBuffer(validJsonData) + c.Request = &http.Request{Body: io.NopCloser(validRequestBody)} + + mockCommonValidationStrategy.On("Validate", mock.Anything, mock.Anything).Return(nil) + mockKProducer.On("SendMessage", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(errors.New("mocked Error")) + + naviApp.IngestSession(c) + + mockCommonValidationStrategy.AssertExpectations(t) + mockKProducer.AssertExpectations(t) + + assert.Equal(t, http.StatusInternalServerError, w.Code) +} + +func TestNaviApp_ingestSession_Pass(t *testing.T) { + log.InitLogger() + + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + mockKProducer := new(mocks.MockKProducer) + mockCommonValidationStrategy := new(mocks.MockCommonValidationStrategy) + naviApp := NaviUserApp{producer: mockKProducer, commonValidator: mockCommonValidationStrategy} + + validData := navi.SessionUploadRequest{} + validJsonData, _ := json.Marshal(validData) + validRequestBody := bytes.NewBuffer(validJsonData) + c.Request = &http.Request{Body: io.NopCloser(validRequestBody)} + + mockCommonValidationStrategy.On("Validate", mock.Anything, mock.Anything).Return(nil) + mockKProducer.On("SendMessage", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + + naviApp.IngestSession(c) + + mockCommonValidationStrategy.AssertExpectations(t) + mockKProducer.AssertExpectations(t) + + assert.Equal(t, http.StatusOK, w.Code) +} + +func TestNaviApp_IngestEvent(t *testing.T) { + log.InitLogger() + + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + mockKProducer := new(mocks.MockKProducer) + mockCommonValidationStrategy := new(mocks.MockCommonValidationStrategy) + naviApp := NaviUserApp{producer: mockKProducer, commonValidator: mockCommonValidationStrategy} + + t.Run("Invalid Request Body", func(t *testing.T) { + naviApp.IngestEvent(c) + assert.Equal(t, http.StatusBadRequest, w.Code) + }) + +} + +func TestNaviApp_ingestEvent_FailKafka(t *testing.T) { + log.InitLogger() + + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + mockKProducer := new(mocks.MockKProducer) + naviApp := NaviUserApp{producer: mockKProducer} + + validData := navi.AppEvent{} + validJsonData, _ := json.Marshal(validData) + validRequestBody := bytes.NewBuffer(validJsonData) + c.Request = &http.Request{Body: io.NopCloser(validRequestBody)} + + mockKProducer.On("SendMessage", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(errors.New("mocked Error")) + + naviApp.IngestEvent(c) + + mockKProducer.AssertExpectations(t) + + assert.Equal(t, http.StatusInternalServerError, w.Code) +} + +func TestNaviApp_ingestEvent_Pass(t *testing.T) { + log.InitLogger() + + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + mockKProducer := new(mocks.MockKProducer) + naviApp := NaviUserApp{producer: mockKProducer} + + validData := navi.AppEvent{} + validJsonData, _ := json.Marshal(validData) + validRequestBody := bytes.NewBuffer(validJsonData) + c.Request = &http.Request{Body: io.NopCloser(validRequestBody)} + + mockKProducer.On("SendMessage", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + + naviApp.IngestEvent(c) + + mockKProducer.AssertExpectations(t) + + assert.Equal(t, http.StatusOK, w.Code) +} + +func TestNaviApp_ingestAppMetrics_FailJson(t *testing.T) { + log.InitLogger() + + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + mockKProducer := new(mocks.MockKProducer) + mockCommonValidationStrategy := new(mocks.MockCommonValidationStrategy) + naviApp := NaviUserApp{producer: mockKProducer, commonValidator: mockCommonValidationStrategy} + + naviApp.IngestAppMetrics(c) + + assert.Equal(t, http.StatusInternalServerError, w.Code) +} + +func TestNaviApp_ingestAppMetrics_FailKafka(t *testing.T) { + log.InitLogger() + + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + mockKProducer := new(mocks.MockKProducer) + mockCommonValidationStrategy := new(mocks.MockCommonValidationStrategy) + naviApp := NaviUserApp{producer: mockKProducer, commonValidator: mockCommonValidationStrategy} + + validData := ingester.AppMetrics{} + validJsonData, _ := json.Marshal(validData) + validRequestBody := bytes.NewBuffer(validJsonData) + c.Request = &http.Request{Body: io.NopCloser(validRequestBody)} + + mockKProducer.On("SendMessage", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(errors.New("mocked Error")) + + naviApp.IngestAppMetrics(c) + + mockKProducer.AssertExpectations(t) + + assert.Equal(t, http.StatusInternalServerError, w.Code) +} + +func TestNaviApp_ingestAppMetrics_Pass(t *testing.T) { + log.InitLogger() + + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + mockKProducer := new(mocks.MockKProducer) + naviApp := NaviUserApp{producer: mockKProducer} + + validData := navi.AppEvent{} + validJsonData, _ := json.Marshal(validData) + validRequestBody := bytes.NewBuffer(validJsonData) + c.Request = &http.Request{Body: io.NopCloser(validRequestBody)} + + mockKProducer.On("SendMessage", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + + naviApp.IngestAppMetrics(c) + + mockKProducer.AssertExpectations(t) + + assert.Equal(t, http.StatusOK, w.Code) +} diff --git a/alfred/cmd/ingester/app/tracer/logger.go b/alfred/cmd/ingester/app/tracer/logger.go new file mode 100644 index 0000000..d0286fd --- /dev/null +++ b/alfred/cmd/ingester/app/tracer/logger.go @@ -0,0 +1,20 @@ +package tracer + +import ( + "alfred/pkg/log" + "go.uber.org/zap" +) + +type logger struct{} + +func (logger logger) Info(message string) { + log.GetLogger().Info(message) +} + +func (logger logger) Warn(message string) { + log.GetLogger().Warn(message) +} + +func (logger logger) Error(message string, err error) { + log.GetLogger().Error(message, zap.Error(err)) +} diff --git a/alfred/cmd/ingester/app/tracer/tracer.go b/alfred/cmd/ingester/app/tracer/tracer.go new file mode 100644 index 0000000..a3737e9 --- /dev/null +++ b/alfred/cmd/ingester/app/tracer/tracer.go @@ -0,0 +1,25 @@ +package tracer + +import ( + "alfred/config" + "context" + gotracer "github.com/navi-commons/go-tracer" + gotracerconfig "github.com/navi-commons/go-tracer/component" + "google.golang.org/grpc/credentials/insecure" +) + +func InitTracer(ctx context.Context, serviceName string) { + if !config.GetIngesterConfig().TracerConfig.APMEnabled { + return + } + + cfg := gotracerconfig.TracerConfig{ + ServiceName: serviceName, + Host: config.GetIngesterConfig().TracerConfig.Url, + Credentials: insecure.NewCredentials(), + ContainerName: serviceName, + K8sDeploymentName: serviceName, + K8sNamespace: config.GetIngesterConfig().BaseConfig.Env, + } + gotracer.InitTracer(ctx, cfg, logger{}) +} diff --git a/alfred/cmd/ingester/app/validation/APIKeyValidator.go b/alfred/cmd/ingester/app/validation/APIKeyValidator.go new file mode 100644 index 0000000..2def4aa --- /dev/null +++ b/alfred/cmd/ingester/app/validation/APIKeyValidator.go @@ -0,0 +1,30 @@ +package validation + +import ( + "alfred/config" + "alfred/utils" + "errors" + "github.com/gin-gonic/gin" +) + +type HeadersValidator interface { + ValidateHeaders(c *gin.Context) (string, error) +} + +type HeadersValidatorImpl struct{} + +func (av *HeadersValidatorImpl) ValidateHeaders(c *gin.Context) (string, error) { + apiKey := c.Request.Header.Get(utils.X_API_KEY) + apiKeyClientMap := config.GetIngesterConfig().ApiKeyConfig + + value, found := apiKeyClientMap[apiKey] + if !found { + return utils.EMPTY, errors.New(utils.INVALID_CLIENT) + } + clientName, err := utils.GetStringValue(value) + if err != nil { + return utils.EMPTY, errors.New("client name could not be parsed") + } + + return clientName, nil +} diff --git a/alfred/cmd/ingester/app/validation/APIKeyValidator_test.go b/alfred/cmd/ingester/app/validation/APIKeyValidator_test.go new file mode 100644 index 0000000..eb20e02 --- /dev/null +++ b/alfred/cmd/ingester/app/validation/APIKeyValidator_test.go @@ -0,0 +1,55 @@ +package validation + +import ( + "alfred/config" + "alfred/pkg/log" + "alfred/utils" + "github.com/gin-gonic/gin" + "github.com/spf13/viper" + "github.com/stretchr/testify/assert" + "net/http" + "net/http/httptest" + "strings" + "testing" +) + +func TestApiKey_ValidateHeaders(t *testing.T) { + log.InitLogger() + + clientMap := "{\"apiKey1\":\"CosmosApp\",\"apiKey2\":\"NaviUserApp\"}" + t.Setenv("API_KEY_CLIENT_MAP", clientMap) + + viper.AutomaticEnv() + viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) + + config.InitIngesterConfig() + + av := HeadersValidatorImpl{} + + t.Run("apikey not found", func(t *testing.T) { + w := httptest.NewRecorder() + req, _ := http.NewRequest("GET", "/test", nil) + req.Header.Set(utils.X_API_KEY, "apiKey") + c, _ := gin.CreateTestContext(w) + c.Request = req + + client, err := av.ValidateHeaders(c) + + assert.Error(t, err) + assert.Equal(t, utils.EMPTY, client) + assert.Equal(t, utils.INVALID_CLIENT, err.Error()) + }) + + t.Run("Pass", func(t *testing.T) { + w := httptest.NewRecorder() + req, _ := http.NewRequest("GET", "/test", nil) + req.Header.Set(utils.X_API_KEY, "apiKey2") + c, _ := gin.CreateTestContext(w) + c.Request = req + + client, err := av.ValidateHeaders(c) + + assert.NoError(t, err) + assert.Equal(t, utils.NAVI_USER_APP, client) + }) +} diff --git a/alfred/cmd/ingester/app/validation/CommonValidationStrategy.go b/alfred/cmd/ingester/app/validation/CommonValidationStrategy.go new file mode 100644 index 0000000..0aba35f --- /dev/null +++ b/alfred/cmd/ingester/app/validation/CommonValidationStrategy.go @@ -0,0 +1,88 @@ +package validation + +import ( + "alfred/config" + "alfred/model/ingester" + "alfred/pkg/log" + "alfred/utils" + "errors" + "fmt" + "github.com/gin-gonic/gin" + "go.uber.org/zap" + "net/http" +) + +type CommonValidationStrategy interface { + Validate(c *gin.Context, request ingester.SessionUploadRequest) error + ValidateWebSession(c *gin.Context, request ingester.WebSessionUploadRequest) error +} + +// enable timestamp logs when it is fixed from android +type CommonValidationStrategyImpl struct{} + +func (s CommonValidationStrategyImpl) Validate(c *gin.Context, request ingester.SessionUploadRequest) error { + + isValidSuffix := utils.ValidateId(request.BaseAttributes.SessionId, utils.SESSION_SUFFIX) && + utils.ValidateZipName(request.SessionUploadEventAttributes.EventId, utils.EVENT_SUFFIX) + isValidPhoneNumber := utils.ValidatePhoneNumber(request.BaseAttributes.PhoneNumber) + allowedFutureTimestamp := config.GetIngesterConfig().FutureTimestampValidationDiffInHours + allowedPastTimestamp := config.GetIngesterConfig().PastTimestampValidationDiffInHours + isValidTimestamp := utils.ValidatePresentTime(request.BaseAttributes.ClientTs, allowedPastTimestamp, allowedFutureTimestamp) && utils.ValidatePresentTime(request.BaseAttributes.EventEndTimeStamp, allowedPastTimestamp, allowedFutureTimestamp) + isPresentRequestBody := !(request.BaseAttributes.SessionId == utils.EMPTY || request.BaseAttributes.ClientTs == 0 || request.SessionUploadEventAttributes.EventId == utils.EMPTY) + + if !isPresentRequestBody { + log.Error("Field Missing in Request Body", zap.String("body", fmt.Sprintf("%v", request))) + err := errors.New("[INGEST_SESSION] bad request exception") + c.JSON(http.StatusBadRequest, utils.ErrorResponse(err, http.StatusBadRequest, nil)) + return err + } + + if !isValidSuffix { + log.Error("ID mismatch", zap.String("body", fmt.Sprintf("%v", request))) + err := errors.New("[INGEST_SESSION] bad request exception") + c.JSON(http.StatusBadRequest, utils.ErrorResponse(err, http.StatusBadRequest, nil)) + return err + } + + if !isValidTimestamp { + //log.Error("TimeStamp validation failed", zap.String("body", fmt.Sprintf("%v", request))) + err := errors.New("[INGEST_SESSION] bad request exception") + c.JSON(http.StatusBadRequest, utils.ErrorResponse(err, http.StatusBadRequest, nil)) + return err + } + + if !isValidPhoneNumber { + log.Error("Phone Number validation failed", zap.String("body", fmt.Sprintf("%v", request))) + err := errors.New("[INGEST_SESSION] bad request exception") + c.JSON(http.StatusBadRequest, utils.ErrorResponse(err, http.StatusBadRequest, nil)) + return err + } + return nil +} + +func (s CommonValidationStrategyImpl) ValidateWebSession(c *gin.Context, request ingester.WebSessionUploadRequest) error { + isValid := utils.ValidateId(request.BaseAttributes.SessionId, utils.WEB_SESSION_SUFFIX) + allowedFutureTimestamp := config.GetIngesterConfig().FutureTimestampValidationDiffInHours + allowedPastTimestamp := config.GetIngesterConfig().PastTimestampValidationDiffInHours + isValidTimestamp := utils.ValidatePresentTime(request.BaseAttributes.ClientTimestamp, allowedPastTimestamp, allowedFutureTimestamp) + isPresentRequestBody := !(request.BaseAttributes.SessionId == utils.EMPTY || request.BaseAttributes.ClientTimestamp == 0) + if !isPresentRequestBody { + log.Error("request Body fields missing", zap.String("body", fmt.Sprintf("%v", request))) + err := errors.New("[INGEST_WEB_SESSION] bad request exception") + c.JSON(http.StatusBadRequest, utils.ErrorResponse(err, http.StatusBadRequest, nil)) + return err + } + if !isValid { + log.Error("ID mismatch", zap.String("body", fmt.Sprintf("%v", request))) + err := errors.New("[INGEST_WEB_SESSION] bad request exception") + c.JSON(http.StatusBadRequest, utils.ErrorResponse(err, http.StatusBadRequest, nil)) + return err + } + if !isValidTimestamp { + //log.Error("timeStamp validation failed", zap.String("body", fmt.Sprintf("%v", request))) + err := errors.New("[INGEST_WEB_SESSION] bad request exception") + c.JSON(http.StatusBadRequest, utils.ErrorResponse(err, http.StatusBadRequest, nil)) + return err + } + return nil +} diff --git a/alfred/cmd/ingester/app/validation/CommonValidationStrategy_test.go b/alfred/cmd/ingester/app/validation/CommonValidationStrategy_test.go new file mode 100644 index 0000000..6f6ca59 --- /dev/null +++ b/alfred/cmd/ingester/app/validation/CommonValidationStrategy_test.go @@ -0,0 +1,198 @@ +package validation + +import ( + "alfred/config" + "alfred/model/ingester" + "alfred/pkg/log" + "errors" + "github.com/google/uuid" + "github.com/spf13/viper" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/assert" +) + +func TestCommonValidationStrategy_Validate(t *testing.T) { + log.InitLogger() + + t.Setenv("REQUEST_BODY_FUTURE_TIMESTAMP_VALIDATION_DIFFERENCE_IN_HOURS", "24") + t.Setenv("REQUEST_BODY_PAST_TIMESTAMP_VALIDATION_DIFFERENCE_IN_HOURS", "24") + + viper.AutomaticEnv() + viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) + + config.InitIngesterConfig() + + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + commonValidationStrategy := CommonValidationStrategyImpl{} + + t.Run("Invalid Request", func(t *testing.T) { + invalidRequest := ingester.SessionUploadRequest{ + BaseAttributes: ingester.BaseAttributes{ + SessionId: "invalid_session_id", + }, + SessionUploadEventAttributes: ingester.SessionUploadEventAttributes{ + EventId: "invalid_event_id", + }, + } + + c.JSON(http.StatusBadRequest, nil) + err := commonValidationStrategy.Validate(c, invalidRequest) + assert.NotNil(t, err) + assert.EqualError(t, err, "[INGEST_SESSION] bad request exception") + assert.Equal(t, http.StatusBadRequest, w.Code) + }) + + t.Run("Invalid Request Client_TS", func(t *testing.T) { + ist, _ := time.LoadLocation("Asia/Kolkata") + invalidRequest := ingester.SessionUploadRequest{ + BaseAttributes: ingester.BaseAttributes{ + SessionId: "ALFRED_SESSION_ID", + ClientTs: time.Now().In(ist).UnixMilli() + 48*time.Hour.Milliseconds(), + }, + SessionUploadEventAttributes: ingester.SessionUploadEventAttributes{ + EventId: "ALFRED_EVENT_ID", + }, + } + + c.JSON(http.StatusBadRequest, nil) + err := commonValidationStrategy.Validate(c, invalidRequest) + assert.NotNil(t, err) + assert.EqualError(t, err, "[INGEST_SESSION] bad request exception") + assert.Equal(t, http.StatusBadRequest, w.Code) + }) + + t.Run("Invalid Phone Number", func(t *testing.T) { + testId, _ := uuid.NewUUID() + ist, _ := time.LoadLocation("Asia/Kolkata") + invalidRequest := ingester.SessionUploadRequest{ + BaseAttributes: ingester.BaseAttributes{ + SessionId: testId.String() + "ALFRED_SESSION_ID", + ClientTs: time.Now().In(ist).UnixMilli(), + EventEndTimeStamp: time.Now().In(ist).UnixMilli(), + PhoneNumber: "1234567aa9", + }, + SessionUploadEventAttributes: ingester.SessionUploadEventAttributes{ + EventId: testId.String() + "ALFRED_EVENT_ID", + }, + } + c.JSON(http.StatusBadRequest, nil) + err := commonValidationStrategy.Validate(c, invalidRequest) + assert.NotNil(t, err) + assert.EqualError(t, err, errors.New("[INGEST_SESSION] bad request exception").Error()) + assert.Equal(t, http.StatusBadRequest, w.Code) + }) + + t.Run("Invalid Request missing request body fields", func(t *testing.T) { + invalidRequest := ingester.SessionUploadRequest{ + BaseAttributes: ingester.BaseAttributes{ + SessionId: "ALFRED_SESSION_ID", + }, + SessionUploadEventAttributes: ingester.SessionUploadEventAttributes{ + EventId: "ALFRED_EVENT_ID", + }, + } + + c.JSON(http.StatusBadRequest, nil) + err := commonValidationStrategy.Validate(c, invalidRequest) + assert.NotNil(t, err) + assert.Equal(t, http.StatusBadRequest, w.Code) + }) + + t.Run("Valid Request", func(t *testing.T) { + testId, _ := uuid.NewUUID() + ist, _ := time.LoadLocation("Asia/Kolkata") + validRequest := ingester.SessionUploadRequest{ + BaseAttributes: ingester.BaseAttributes{ + SessionId: testId.String() + "ALFRED_SESSION_ID", + PhoneNumber: "1234567890", + ClientTs: time.Now().In(ist).UnixMilli(), + EventEndTimeStamp: time.Now().In(ist).UnixMilli(), + }, + SessionUploadEventAttributes: ingester.SessionUploadEventAttributes{ + EventId: testId.String() + "ALFRED_EVENT_ID", + }, + } + + err := commonValidationStrategy.Validate(c, validRequest) + assert.Nil(t, err) + }) +} + +func TestCommonValidationStrategy_ValidateWebSession(t *testing.T) { + log.InitLogger() + + t.Setenv("REQUEST_BODY_FUTURE_TIMESTAMP_VALIDATION_DIFFERENCE_IN_HOURS", "24") + t.Setenv("REQUEST_BODY_PAST_TIMESTAMP_VALIDATION_DIFFERENCE_IN_HOURS", "24") + + viper.AutomaticEnv() + viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) + + config.InitIngesterConfig() + + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + commonValidationStrategy := CommonValidationStrategyImpl{} + + t.Run("Invalid Request", func(t *testing.T) { + invalidRequest := ingester.WebSessionUploadRequest{ + BaseAttributes: ingester.WebBaseAttributes{ + SessionId: "invalid_session_id", + ClientTimestamp: 1, + }, + } + + c.JSON(http.StatusBadRequest, nil) + err := commonValidationStrategy.ValidateWebSession(c, invalidRequest) + assert.NotNil(t, err) + assert.EqualError(t, err, "[INGEST_WEB_SESSION] bad request exception") + assert.Equal(t, http.StatusBadRequest, w.Code) + }) + + t.Run("Invalid Request timestamp", func(t *testing.T) { + ist, _ := time.LoadLocation("Asia/Kolkata") + testId, _ := uuid.NewUUID() + invalidRequest := ingester.WebSessionUploadRequest{ + BaseAttributes: ingester.WebBaseAttributes{ + SessionId: testId.String() + "", + ClientTimestamp: time.Now().In(ist).UnixMilli() + 48*time.Hour.Milliseconds(), + }, + } + + c.JSON(http.StatusBadRequest, nil) + err := commonValidationStrategy.ValidateWebSession(c, invalidRequest) + assert.NotNil(t, err) + assert.Equal(t, http.StatusBadRequest, w.Code) + }) + + t.Run("Invalid Request body fields missing", func(t *testing.T) { + invalidRequest := ingester.WebSessionUploadRequest{ + BaseAttributes: ingester.WebBaseAttributes{}, + } + + c.JSON(http.StatusBadRequest, nil) + err := commonValidationStrategy.ValidateWebSession(c, invalidRequest) + assert.NotNil(t, err) + assert.Equal(t, http.StatusBadRequest, w.Code) + }) + + t.Run("Valid Request", func(t *testing.T) { + testId, _ := uuid.NewUUID() + ist, _ := time.LoadLocation("Asia/Kolkata") + validRequest := ingester.WebSessionUploadRequest{ + BaseAttributes: ingester.WebBaseAttributes{ + SessionId: testId.String() + "", + ClientTimestamp: time.Now().In(ist).UnixMilli(), + }, + } + + err := commonValidationStrategy.ValidateWebSession(c, validRequest) + assert.Nil(t, err) + }) +} diff --git a/alfred/cmd/ingester/app/validation/CosmosValidationStrategy.go b/alfred/cmd/ingester/app/validation/CosmosValidationStrategy.go new file mode 100644 index 0000000..10d21af --- /dev/null +++ b/alfred/cmd/ingester/app/validation/CosmosValidationStrategy.go @@ -0,0 +1,43 @@ +package validation + +import ( + "alfred/cmd/ingester/app/model/cosmos" + "alfred/config" + "alfred/pkg/log" + "alfred/utils" + "errors" + "github.com/gin-gonic/gin" +) + +type CosmosValidationStrategy interface { + Validate(c *gin.Context, adapter cosmos.SessionUploadRequest) error +} + +type CosmosValidationStrategyImpl struct{} + +func (s *CosmosValidationStrategyImpl) Validate(c *gin.Context, adapter cosmos.SessionUploadRequest) error { + + if err := validateMetadata(adapter.BaseAttributes.Metadata); err != nil { + return err + } + return nil + +} + +func validateMetadata(metadata map[string]string) error { + if metadata == nil || len(metadata) == 0 { + return nil + } + + whitelistedTags := config.GetIngesterConfig().WhiteListedTags.CosmosTags + + for key := range metadata { + if !utils.Contains(whitelistedTags, key) { + log.Error("Tags are not whitelisted") + err := errors.New("metadata key " + key + " is not allowed") + return err + } + } + + return nil +} diff --git a/alfred/cmd/ingester/app/validation/CosmosValidationStrategy_test.go b/alfred/cmd/ingester/app/validation/CosmosValidationStrategy_test.go new file mode 100644 index 0000000..5c1d2dc --- /dev/null +++ b/alfred/cmd/ingester/app/validation/CosmosValidationStrategy_test.go @@ -0,0 +1,90 @@ +package validation + +import ( + "alfred/cmd/ingester/app/model/cosmos" + "alfred/pkg/log" + "alfred/utils" + "errors" + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestCosmosValidationStrategy_Validate(t *testing.T) { + + log.InitLogger() + + ctx := &gin.Context{} + + tests := []struct { + name string + request cosmos.SessionUploadRequest + expected error + }{ + { + name: "InvalidMetadata", + request: cosmos.SessionUploadRequest{ + BaseAttributes: cosmos.BaseAttributes{ + Metadata: map[string]string{"disallowed_key": "value1"}, + }, + }, + expected: errors.New("metadata key disallowed_key is not allowed"), + }, + { + name: "EmptyMetadata", + request: cosmos.SessionUploadRequest{}, + expected: nil, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + // Create a CosmosValidationStrategyImpl instance (you might need to set up other dependencies as well) + validator := CosmosValidationStrategyImpl{} + + // Call the Validate method + err := validator.Validate(ctx, test.request) + + // Assert the result + assert.Equal(t, test.expected, err) + }) + } +} + +func TestValidateMetadata(t *testing.T) { + + log.InitLogger() + + tests := []struct { + name string + metadata map[string]string + expected error + }{ + { + name: "InvalidMetadata", + metadata: map[string]string{"disallowed_key": "value1"}, + expected: errors.New("metadata key disallowed_key is not allowed"), + }, + { + name: "EmptyMetadata", + metadata: nil, + expected: nil, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := validateMetadata(test.metadata) + assert.Equal(t, test.expected, err) + }) + } +} + +func TestContains(t *testing.T) { + slice := []string{"foo", "bar", "baz"} + + assert.True(t, utils.Contains(slice, "foo")) + assert.True(t, utils.Contains(slice, "bar")) + assert.True(t, utils.Contains(slice, "baz")) + assert.False(t, utils.Contains(slice, "qux")) +} diff --git a/alfred/cmd/ingester/app/validation/RateLimiter.go b/alfred/cmd/ingester/app/validation/RateLimiter.go new file mode 100644 index 0000000..8f23c68 --- /dev/null +++ b/alfred/cmd/ingester/app/validation/RateLimiter.go @@ -0,0 +1,101 @@ +package validation + +import ( + "alfred/config" + "alfred/utils" + "fmt" + "github.com/gin-gonic/gin" + "github.com/ulule/limiter/v3" + mgin "github.com/ulule/limiter/v3/drivers/middleware/gin" + "github.com/ulule/limiter/v3/drivers/store/memory" + "strings" + "time" +) + +// globalRate defines the default rate limit for routes that don't have a specific configuration. +var globalRate = limiter.Rate{ + Period: config.GetIngesterConfig().RateLimiterConfig.RateLimiterConfigGlobalPeriod * time.Minute, + Limit: config.GetIngesterConfig().RateLimiterConfig.RateLimiterConfigGlobalLimit, +} + +// routeNameMap maps API routes to route names for configuration lookup. +var routeNameMap = map[string]string{ + utils.WEB_SESSIONS_V2: utils.WEB_SESSIONS_V2_VALUE, + utils.CRUISE_CONTROL: utils.CRUISE_CONTROL_VALUE, + utils.CRUISE_CONTROL_V2: utils.CRUISE_CONTROL_V2_VALUE, +} + +// parseRate converts a rate limit string to a limiter.Rate struct. +func parseRate(rateStr string) (limiter.Rate, error) { + parts := strings.Split(rateStr, utils.HYPHEN) + if len(parts) != 2 { + return limiter.Rate{}, fmt.Errorf("invalid rate format: %s", rateStr) + } + + limit, err := limiter.NewRateFromFormatted(rateStr) + if err != nil { + return limiter.Rate{}, err + } + return limit, nil +} + +// retrieveRateConfig fetches the rate configuration for a route. +func retrieveRateConfig(mode, routeName string) (limiter.Rate, error) { + + rateConfig := config.GetIngesterConfig().RateLimiterConfig.RateLimiterConfig + + rateStr, exists := rateConfig[mode+utils.COLON+routeNameMap[routeName]] + if !exists { + return limiter.Rate{}, fmt.Errorf("rate configuration not found for mode: %s, routeName: %s", mode, routeName) + } + + return parseRate(rateStr) +} + +// RateControl is a middleware to handle rate limiting. +func RateControl(routeName string) gin.HandlerFunc { + // Determine the route name dynamically. + + mode := "default" // Replace this with your actual mode retrieval logic. + + // Retrieve the rate configuration or use a global rate as fallback. + rate, err := retrieveRateConfig(mode, routeName) + if err != nil { + rate = globalRate + } + + options := &limiter.StoreOptions{ + Prefix: mode + utils.COLON + routeName + utils.COLON, + CleanUpInterval: config.GetIngesterConfig().RateLimiterConfig.RateLimiterCleanUpInterval * time.Minute, + } + + // Create a rate limiter based on the route name and mode. + storeWithPrefix := memory.NewStoreWithOptions( + *options, + ) + rateLimiter := limiter.New(storeWithPrefix, rate) + + // Apply the rate limiter middleware. + var keyGetter func(c *gin.Context) string + + switch routeName { + case utils.CRUISE_CONTROL: + keyGetter = keyGetterOptionApp + case utils.CRUISE_CONTROL_V2: + keyGetter = keyGetterOptionApp + case utils.WEB_SESSIONS_V2: + keyGetter = keyGetterOption + } + + op := mgin.WithKeyGetter(keyGetter) + + return mgin.NewMiddleware(rateLimiter, op) +} + +func keyGetterOption(c *gin.Context) string { + return c.ClientIP() + utils.COLON + c.Query(utils.PROJECT_NAME) +} + +func keyGetterOptionApp(c *gin.Context) string { + return c.ClientIP() + utils.COLON + c.GetHeader(utils.DEVICE_ID) +} diff --git a/alfred/cmd/ingester/app/validation/RateLimiter_test.go b/alfred/cmd/ingester/app/validation/RateLimiter_test.go new file mode 100644 index 0000000..53330c5 --- /dev/null +++ b/alfred/cmd/ingester/app/validation/RateLimiter_test.go @@ -0,0 +1,122 @@ +package validation + +import ( + "alfred/config" + "alfred/utils" + "github.com/gin-gonic/gin" + "github.com/spf13/viper" + "github.com/stretchr/testify/assert" + "github.com/ulule/limiter/v3" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" +) + +// Test RateControl middleware +func TestRateControlMiddleware(t *testing.T) { + // Create a test Gin router and use the RateControl middleware + t.Setenv("RATE_LIMIT_CONFIG", "{\"default:webSession\":\"2-M\",\"default:webSessionV2\":\"100000-M\",\"default:cruise\":\"10-M\",\"default:cruiseV2\":\"10-M\"}") + t.Setenv("RATE_LIMIT_CLEAN_UP_INTERVAL", "2") + + viper.AutomaticEnv() + viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) + + config.InitIngesterConfig() + + router := gin.New() + router.Use(RateControl(utils.WEB_SESSIONS)) + + // Define a test route handler + router.GET("/test", func(c *gin.Context) { + c.JSON(200, gin.H{"message": "success"}) + }) + + // Create HTTP request and recorder for testing + req := createTestRequest("GET", "/test") + recorder := createTestResponseRecorder() + + // Test rate limiting + for i := 0; i < 5; i++ { + router.ServeHTTP(recorder, req) + } + assert.Equal(t, http.StatusOK, recorder.Code) + +} + +func TestRateControlMiddleware_Fail(t *testing.T) { + // Create a test Gin router and use the RateControl middleware + t.Setenv("RATE_LIMIT_CONFIG", "{\"default:webSession\":\"2-M\",\"default:webSessionV2\":\"100000-M\",\"default:cruise\":\"10-M\",\"default:cruiseV2\":\"10-M\"}") + t.Setenv("RATE_LIMIT_CLEAN_UP_INTERVAL", "2") + t.Setenv("RATE_LIMIT_GLOBAL_CONFIG_LIMIT", "100") + t.Setenv("RATE_LIMIT_GLOBAL_CONFIG_PERIOD", "1") + + viper.AutomaticEnv() + viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) + + config.InitIngesterConfig() + + router := gin.New() + router.Use(RateControl(utils.WEB_SESSIONS)) + + // Define a test route handler + router.GET("/test", func(c *gin.Context) { + c.JSON(429, gin.H{"message": "success"}) + }) + + // Create HTTP request and recorder for testing + req := createTestRequest("GET", "/test") + req.Header.Set("X-Real-IP", "192.168.0.1") + + recorder := createTestResponseRecorder() + + // Test rate limiting + for i := 0; i < 10005; i++ { + router.ServeHTTP(recorder, req) + } + assert.Equal(t, http.StatusTooManyRequests, recorder.Code) + +} + +func TestParseRate(t *testing.T) { + tests := []struct { + input string + expected limiter.Rate + expectErr bool + errMessage string + }{ + // Valid input + {"10-S", limiter.Rate{Formatted: "10-S", Period: time.Second, Limit: 10}, false, ""}, + {"5-M", limiter.Rate{Formatted: "5-M", Period: time.Minute, Limit: 5}, false, ""}, + + // Invalid input + {"invalid", limiter.Rate{}, true, "invalid rate format: invalid"}, + {"3", limiter.Rate{}, true, "invalid rate format: 3"}, + } + + for _, test := range tests { + t.Run(test.input, func(t *testing.T) { + rate, err := parseRate(test.input) + if test.expectErr { + assert.Error(t, err) + assert.EqualError(t, err, test.errMessage) + assert.Equal(t, limiter.Rate{}, rate) + } else { + assert.NoError(t, err) + assert.Equal(t, test.expected, rate) + } + }) + } +} + +// Helper function to create a test HTTP request +func createTestRequest(method, path string) *http.Request { + req, _ := http.NewRequest(method, path, nil) + return req +} + +// Helper function to create a test HTTP response recorder +func createTestResponseRecorder() *httptest.ResponseRecorder { + return httptest.NewRecorder() +} diff --git a/alfred/cmd/ingester/app/validation/ValidationStrategy.go b/alfred/cmd/ingester/app/validation/ValidationStrategy.go new file mode 100644 index 0000000..e703f90 --- /dev/null +++ b/alfred/cmd/ingester/app/validation/ValidationStrategy.go @@ -0,0 +1,10 @@ +package validation + +import ( + "alfred/model/ingester" + "github.com/gin-gonic/gin" +) + +type ValidationStrategy interface { + Validate(c *gin.Context, request ingester.SessionUploadRequest) error +} diff --git a/alfred/cmd/ingester/app/validation/WebClientValidator.go b/alfred/cmd/ingester/app/validation/WebClientValidator.go new file mode 100644 index 0000000..688afad --- /dev/null +++ b/alfred/cmd/ingester/app/validation/WebClientValidator.go @@ -0,0 +1,45 @@ +package validation + +import ( + "alfred/config" + "alfred/model/ingester" + "alfred/utils" + "bytes" + "encoding/json" + "errors" + "github.com/gin-gonic/gin" + "io" +) + +type WebClientValidator interface { + ValidateWebClientUsingProjectName(c *gin.Context) (string, error) +} + +type WebClientValidatorImpl struct { + CommonValidator CommonValidationStrategy +} + +func (wcv *WebClientValidatorImpl) ValidateWebClientUsingProjectName(c *gin.Context) (string, error) { + data, err := io.ReadAll(c.Request.Body) + if err != nil { + return utils.EMPTY, errors.New("error reading web request body") + } + + c.Request.Body = io.NopCloser(bytes.NewReader(data)) + var request ingester.WebSessionUploadRequest + if err := json.Unmarshal(data, &request); err != nil { + return utils.EMPTY, errors.New("error unmarshalling web request body") + } + + webClient := request.BaseAttributes.ProjectName + validWebClients := config.GetIngesterConfig().WebWhitelistedClientsConfig + if !utils.Contains(validWebClients, webClient) { + return utils.EMPTY, errors.New(utils.INVALID_WEB_CLIENT) + } + + if err := wcv.CommonValidator.ValidateWebSession(c, request); err != nil { + return utils.EMPTY, err + } + + return webClient, nil +} diff --git a/alfred/cmd/ingester/app/validation/WebClientValidator_test.go b/alfred/cmd/ingester/app/validation/WebClientValidator_test.go new file mode 100644 index 0000000..99127d4 --- /dev/null +++ b/alfred/cmd/ingester/app/validation/WebClientValidator_test.go @@ -0,0 +1,105 @@ +package validation + +import ( + "alfred/config" + "alfred/model/ingester" + "alfred/pkg/log" + "alfred/utils" + "bytes" + "encoding/json" + "github.com/gin-gonic/gin" + "github.com/google/uuid" + "github.com/spf13/viper" + "github.com/stretchr/testify/assert" + "io" + "net/http" + "net/http/httptest" + "strings" + "testing" +) + +func TestWebClientValidatorImpl_ValidateWebClientUsingProjectName(t *testing.T) { + log.InitLogger() + + t.Setenv("WHITELISTED_WEB_CLIENTS", "RAVEN,NAVI") + t.Setenv("REQUEST_BODY_FUTURE_TIMESTAMP_VALIDATION_DIFFERENCE_IN_HOURS", "24") + t.Setenv("REQUEST_BODY_PAST_TIMESTAMP_VALIDATION_DIFFERENCE_IN_HOURS", "24") + + viper.AutomaticEnv() + viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) + + config.InitIngesterConfig() + + wcv := WebClientValidatorImpl{CommonValidator: CommonValidationStrategyImpl{}} + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + + t.Run("invalid request body", func(t *testing.T) { + validData := "" + validJsonData, _ := json.Marshal(validData) + validRequestBody := bytes.NewBuffer(validJsonData) + + c.Request = &http.Request{Body: io.NopCloser(validRequestBody)} + webClient, err := wcv.ValidateWebClientUsingProjectName(c) + assert.NotNil(t, err) + assert.EqualError(t, err, "error unmarshalling web request body") + assert.Equal(t, webClient, utils.EMPTY) + }) + + t.Run("valid request body but client not whitelisted", func(t *testing.T) { + validData := ingester.WebSessionUploadRequest{ + BaseAttributes: ingester.WebBaseAttributes{ + SessionId: "", + ProjectName: "Invalid", + }, + SessionAttributes: ingester.WebSessionAttributes{}, + } + validJsonData, _ := json.Marshal(validData) + validRequestBody := bytes.NewBuffer(validJsonData) + + c.Request = &http.Request{Body: io.NopCloser(validRequestBody)} + webClient, err := wcv.ValidateWebClientUsingProjectName(c) + assert.NotNil(t, err) + assert.Equal(t, webClient, utils.EMPTY) + assert.EqualError(t, err, utils.INVALID_WEB_CLIENT) + }) + + t.Run("valid request body with client whitelisted but invalid session id", func(t *testing.T) { + validData := ingester.WebSessionUploadRequest{ + BaseAttributes: ingester.WebBaseAttributes{ + SessionId: "", + ProjectName: "RAVEN", + }, + SessionAttributes: ingester.WebSessionAttributes{}, + } + validJsonData, _ := json.Marshal(validData) + validRequestBody := bytes.NewBuffer(validJsonData) + + c.Request = &http.Request{Body: io.NopCloser(validRequestBody)} + webClient, err := wcv.ValidateWebClientUsingProjectName(c) + assert.NotNil(t, err) + assert.Equal(t, webClient, utils.EMPTY) + assert.EqualError(t, err, "[INGEST_WEB_SESSION] bad request exception") + }) + + t.Run("valid request body with client whitelisted and valid session id", func(t *testing.T) { + testId, _ := uuid.NewUUID() + validData := ingester.WebSessionUploadRequest{ + BaseAttributes: ingester.WebBaseAttributes{ + SessionId: testId.String() + "", + ProjectName: "RAVEN", + ClientTimestamp: utils.GetCurrentTimeInMillis(), + }, + SessionAttributes: ingester.WebSessionAttributes{}, + } + validJsonData, _ := json.Marshal(validData) + validRequestBody := bytes.NewBuffer(validJsonData) + + c.Request = &http.Request{Body: io.NopCloser(validRequestBody)} + webClient, err := wcv.ValidateWebClientUsingProjectName(c) + assert.Nil(t, err) + assert.NotNil(t, webClient) + assert.Equal(t, "RAVEN", webClient) + }) + +} diff --git a/alfred/cmd/ingester/main.go b/alfred/cmd/ingester/main.go new file mode 100644 index 0000000..6c2fec1 --- /dev/null +++ b/alfred/cmd/ingester/main.go @@ -0,0 +1,46 @@ +package main + +import ( + "alfred/cmd/ingester/app" + "alfred/cmd/ingester/app/tracer" + "alfred/config" + "alfred/pkg/log" + "context" + ginzap "github.com/gin-contrib/zap" + "github.com/gin-gonic/gin" + "github.com/spf13/cobra" + _ "go.uber.org/automaxprocs" + "go.uber.org/zap" + "os" +) + +var serviceName = "alfred-ingester" + +func main() { + log.InitLogger(serviceName) + config.LoadIngesterConfig() + tracer.InitTracer(context.Background(), serviceName) + + command := &cobra.Command{ + Use: "alfred-ingester", + Short: "alfred ingester receive all app events and ingest them into kafka", + Long: "alfred ingester receive all app events and ingest them into kafka", + RunE: func(cmd *cobra.Command, args []string) error { + r := gin.New() + + //r.Use(ginzap.Ginzap(logger, time.RFC3339, true)) + + r.Use(ginzap.RecoveryWithZap(log.GetLogger(), true)) + + sv := app.NewServer(r) + sv.Handler() + sv.Start() + return nil + }, + } + + if err := command.Execute(); err != nil { + log.Error("alfred ingester main command execution failed", zap.Error(err)) + os.Exit(1) + } +} diff --git a/alfred/config/.gitignore b/alfred/config/.gitignore new file mode 100644 index 0000000..e69de29 diff --git a/alfred/config/application-collector.properties b/alfred/config/application-collector.properties new file mode 100644 index 0000000..5481b8c --- /dev/null +++ b/alfred/config/application-collector.properties @@ -0,0 +1,62 @@ +env=ENV +port=PORT +metrics.port=METRICS_PORT +# gin +gin.mode=GIN_MODE +# kafka config +kafka.brokers=KAFKA_BROKERS +kafka.username=KAFKA_USERNAME +kafka.password=KAFKA_PASSWORD +kafka.tls.insecureSkipVerify=KAFKA_TLS_INSECURESKIPVERIFY +kafka.tls.enabled=KAFKA_TLS_ENABLED +kafka.sasl.enabled=KAFKA_SASL_ENABLED +kafka.group.id=KAFKA_GROUP_ID +# kafka topics +kafka.alfred.mobile.session.upload.topic=KAFKA_ALFRED_MOBILE_SESSION_UPLOAD_TOPIC +kafka.alfred.mobile.metric.ingestion.topic=KAFKA_ALFRED_MOBILE_METRIC_INGESTION_TOPIC +kafka.alfred.mobile.event.ingestion.topic=KAFKA_ALFRED_MOBILE_EVENT_INGESTION_TOPIC +kafka.alfred.web.session.upload.topic=KAFKA_ALFRED_WEB_SESSION_UPLOAD_TOPIC +kafka.alfred.error.events.upload.topic=KAFKA_ALFRED_ERROR_EVENTS_UPLOAD_TOPIC +kafka.alfred.error.events.slack.push.topic=KAFKA_ALFRED_ERROR_EVENTS_SLACK_PUSH_TOPIC +kafka.alfred.error.events.update.topic=KAFKA_ALFRED_ERROR_EVENTS_UPDATE_TOPIC +error.events.slack.template.id=ERROR_EVENTS_SLACK_TEMPLATE_ID +# session +session.upload.listener.group.id=SESSION_UPLOAD_LISTENER_GROUP_ID +metric.upload.listener.group.id=METRIC_UPLOAD_LISTENER_GROUP_ID +event.ingestion.listener.group.id=EVENT_INGESTION_LISTENER_GROUP_ID +web.session.upload.listener.group.id=WEB_SESSION_UPLOAD_LISTENER_GROUP_ID +error.events.upload.listener.group.id=ERROR_EVENTS_UPLOAD_LISTENER_GROUP_ID +error.events.update.listener.group.id=ERROR_EVENTS_UPDATE_LISTENER_GROUP_ID +# es config +elasticsearch.nodes=ELASTICSEARCH_NODES +elasticsearch.username=ELASTICSEARCH_USERNAME +elasticsearch.password=ELASTICSEARCH_PASSWORD +elasticsearch.tls.insecureSkipVerify=ELASTICSEARCH_TLS_INSECURESKIPVERIFY +elasticsearch.max.idle.connection=ELASTICSEARCH_MAX_IDLE_CONNECTION +elasticsearch.max.connection=ELASTICSEARCH_MAX_CONNECTION +elasticsearch.idle.connection.timout=ELASTICSEARCH_IDLE_CONNECTION_TIMEOUT +# es indexes +elasticsearch.session.upload.index.client.map=ELASTICSEARCH_SESSION_UPLOAD_INDEX_CLIENT_MAP +elasticsearch.metric.ingestion.index=ELASTICSEARCH_METRIC_INGESTION_INDEX +elasticsearch.event.ingestion.index.client.map=ELASTICSEARCH_EVENT_INGESTION_INDEX_CLIENT_MAP +elasticsearch.web.session.upload.index.client.map=ELASTICSEARCH_WEB_SESSION_UPLOAD_INDEX_CLIENT_MAP +elasticsearch.fragment.ingestion.index=ELASTICSEARCH_FRAGMENT_INGESTION_INDEX +elasticsearch.error.events.upload.index.client.map=ELASTICSEARCH_ERROR_EVENTS_UPLOAD_INDEX_CLIENT_MAP +elasticsearch.min.web.version.supporting.single.doc=ELASTICSEARCH_MIN_WEB_VERSION_SUPPORTING_SINGLE_DOC +#s3 config +s3.web.session.upload.bucket.client.map=S3_WEB_SESSION_UPLOAD_BUCKET_CLIENT_MAP +#Cache +cache.time.fragments.ingestion=CACHE_TIME_FRAGMENTS_INGESTION +#Retry +ingest.elasticsearch.max.retry=INGEST_ELASTICSEARCH_MAX_RETRY +ingest.elasticsearch.initial.delay.in.seconds=INGEST_ELASTICSEARCH_INITIAL_DELAY_IN_SECONDS +session.error.events.filter=SESSION_ERROR_EVENTS_FILTER +ingest.error.events.filter=INGEST_ERROR_EVENTS_FILTER +#timestamp validation +request.body.future.timestamp.validation.difference.in.hours=REQUEST_BODY_FUTURE_TIMESTAMP_VALIDATION_DIFFERENCE_IN_HOURS +request.body.past.timestamp.validation.difference.in.hours=REQUEST_BODY_PAST_TIMESTAMP_VALIDATION_DIFFERENCE_IN_HOURS +ignored.event.types=IGNORED_EVENT_TYPES +#consumer +event.listener.goroutine.group.limit=EVENT_LISTENER_GOROUTINE_GROUP_LIMIT +web.session.upload.listener.goroutine.group.limit=WEB_SESSION_UPLOAD_LISTENER_GOROUTINE_GROUP_LIMIT +session.upload.listener.goroutine.group.limit=SESSION_UPLOAD_LISTENER_GOROUTINE_GROUP_LIMIT \ No newline at end of file diff --git a/alfred/config/application-core.properties b/alfred/config/application-core.properties new file mode 100644 index 0000000..1a2dbd0 --- /dev/null +++ b/alfred/config/application-core.properties @@ -0,0 +1,103 @@ +env=ENV +port=PORT +metrics.port=METRICS_PORT +# gin +gin.mode=GIN_MODE +# es +elasticsearch.nodes=ELASTICSEARCH_NODES +elasticsearch.username=ELASTICSEARCH_USERNAME +elasticsearch.password=ELASTICSEARCH_PASSWORD +elasticsearch.cruise.control.index=ELASTICSEARCH_CRUISE_CONTROL_INDEX +elasticsearch.event.ingestion.index=ELASTICSEARCH_EVENT_INGESTION_INDEX +elasticsearch.session.upload.index=ELASTICSEARCH_SESSION_UPLOAD_INDEX +elasticsearch.device.metrics.index=ELASTICSEARCH_DEVICE_METRICS_INDEX +elasticsearch.web.session.upload.index.client.map=ELASTICSEARCH_WEB_SESSION_UPLOAD_INDEX_CLIENT_MAP +elasticsearch.tls.insecureSkipVerify=ELASTICSEARCH_TLS_INSECURESKIPVERIFY +elasticsearch.max.idle.connection=ELASTICSEARCH_MAX_IDLE_CONNECTION +elasticsearch.max.connection=ELASTICSEARCH_MAX_CONNECTION +elasticsearch.idle.connection.timout=ELASTICSEARCH_IDLE_CONNECTION_TIMEOUT +elasticsearch.index.creation.cron.schedule=ELASTICSEARCH_INDEX_CREATION_CRON_SCHEDULE +elasticsearch.error.events.upload.index.client.map=ELASTICSEARCH_ERROR_EVENTS_UPLOAD_INDEX_CLIENT_MAP +elasticsearch.error.events.update.cron.schedule=ELASTICSEARCH_ERROR_EVENTS_UPDATE_CRON_SCHEDULE +elasticsearch.error.events.last.cron.timestamp.index=ELASTICSEARCH_ERROR_EVENTS_LAST_CRON_TIMESTAMP_INDEX +elasticsearch.shed.lock.cron.index=ELASTICSEARCH_SHED_LOCK_CRON_INDEX +elasticsearch.error.events.update.batch.size=ELASTICSEARCH_ERROR_EVENTS_UPDATE_BATCH_SIZE +# s3 +s3.session.upload.bucket=S3_SESSION_UPLOAD_BUCKET +s3.video.upload.bucket=S3_VIDEO_UPLOAD_BUCKET +s3.web.session.upload.bucket.client.map=S3_WEB_SESSION_UPLOAD_BUCKET_CLIENT_MAP +s3.min.web.version.supporting.folder.upload=S3_MIN_WEB_VERSION_SUPPORTING_FOLDER_UPLOAD +# external +customer.profile.service.url=CUSTOMER_PROFILE_SERVICE_URL +customer.federation.layer.url=CUSTOMER_FEDERATION_LAYER_URL +customer.federation.tenant.id=CUSTOMER_FEDERATION_TENANT_ID +mjolnir.service.url=MJOLNIR_SERVICE_URL +mjolnir.realm.id=MJOLNIR_REALM_ID +slack.service.url=SLACK_SERVICE_URL +data.science.service.masking.base.url=DATA_SCIENCE_SERVICE_MASKING_BASE_URL +# ffmpeg config +ffmpeg.video.generation.framerate=FFMPEG_VIDEO_GENERATION_FRAMERATE +cruise.dropdowns=CRUISE_DROPDOWNS +#http +http.max.idle.connection.pool=HTTP_MAX_IDLE_CONNECTION_POOL +http.max.connection=HTTP_MAX_CONNECTION +http.max.timeout.seconds=HTTP_MAX_TIMEOUT_SECONDS +whitelisted.domains=WHITELISTED_DOMAINS +allowed.custom.headers=ALLOWED_CUSTOM_HEADERS +auth.enabled=AUTH_ENABLED +source.to.bypass=SOURCE_TO_BYPASS +# filter config +filter.screen.tag.config=FILTER_SCREEN_TAG_CONFIG +zip.upload.frequency.milliseconds=ZIP_UPLOAD_FREQUENCY_MILLISECONDS +api.key.client.map=API_KEY_CLIENT_MAP +client.app.os.map=CLIENT_APP_OS_MAP +clients.without.duration.response=CLIENTS_WITHOUT_DURATION_RESPONSE +default.app.sessions.time.hours=DEFAULT_APP_SESSIONS_TIME_HOURS +ingestor.client.api.key.map=INGESTOR_CLIENT_API_KEY_MAP +alfred.ingestor.base.url=ALFRED_INGESTOR_BASE_URL +masked.screens.strategy.map=MASKED_SCREENS_STRATEGY_MAP +masked.screens.blur.ratio.map=MASKED_SCREENS_BLUR_RATIO_MAP +masking.enabled=MASKING_ENABLED +default.blur.screen.ratio=DEFAULT_BLUR_SCREEN_RATIO +default.blur.screen.strength=DEFAULT_BLUR_SCREEN_STRENGTH +buffer.time.millis.mask=BUFFER_TIME_MILLIS_MASK +masking.min.app.version.code.client.map=MASKING_MIN_APP_VERSION_CODE_CLIENT_MAP +ds.masking.screens.with.face.detection.enabled=DS_MASKING_SCREENS_WITH_FACE_DETECTION_ENABLED +masking.fallback.screens=MASKING_FALLBACK_SCREENS +#2 fps metrics monitoring +device.metrics.monitoring.cron.schedule.map=DEVICE_METRICS_MONITORING_CRON_SCHEDULE_MAP +device.metrics.monitoring.channel.id.map=DEVICE_METRICS_MONITORING_CHANNEL_ID_MAP +device.metrics.monitoring.battery.alert.map=DEVICE_METRICS_MONITORING_BATTERY_ALERT_MAP +device.metrics.monitoring.memory.alert.map=DEVICE_METRICS_MONITORING_MEMORY_ALERT_MAP +device.metrics.monitoring.slack.service.template.id=DEVICE_METRICS_MONITORING_SLACK_SERVICE_TEMPLATE_ID +device.metrics.update.cron.schedule.map=DEVICE_METRICS_UPDATE_CRON_SCHEDULE_MAP +device.metrics.total.limit.map=DEVICE_METRICS_TOTAL_LIMIT_MAP +video.generation.status.index.client.map=VIDEO_GENERATION_STATUS_INDEX_CLIENT_MAP +video.generation.events.app.versions=VIDEO_GENERATION_EVENTS_APP_VERSIONS +min.app.version.for.screenname.filter=MIN_APP_VERSION_FOR_SCREENNAME_FILTER +elasticsearch.error.events.update.cron.delay.time.in.Minutes=ELASTICSEARCH_ERROR_EVENTS_UPDATE_CRON_DELAY_TIME_IN_MINUTES +session.error.events.filter=SESSION_ERROR_EVENTS_FILTER +max.fetch.web.video.gofunc.concurrency=MAX_FETCH_WEB_VIDEO_GOFUNC_CONCURRENCY +max.blurring.video.gofunc.concurrency=MAX_BLURRING_VIDEO_GOFUNC_CONCURRENCY +max.update.session.error.events.gofunc.concurrency=MAX_UPDATE_SESSION_ERROR_EVENTS_GOFUNC_CONCURRENCY +elasticsearch.update.max.retry=ELASTICSEARCH_UPDATE_MAX_RETRY +elasticsearch.update.retry.backoff.seconds=ELASTICSEARCH_UPDATE_RETRY_BACKOFF_SECONDS +device.metrics.cron.locked.until=DEVICE_METRICS_CRON_LOCKED_UNTIL +session.index.creation.cron.locked.until=SESSION_INDEX_CREATION_CRON_LOCKED_UNTIL +event.index.creation.cron.locked.until=EVENT_INDEX_CREATION_CRON_LOCKED_UNTIL +error.event.update.cron.locked.until=ERROR_EVENT_UPDATE_CRON_LOCKED_UNTIL +# TouchPoints Config +touchpoints.enabled=TOUCHPOINTS_ENABLED +touchpoints.min.app.version.code.client.map=TOUCHPOINTS_MIN_APP_VERSION_CODE_CLIENT_MAP +touchpoints.circle.radius=TOUCHPOINTS_CIRCLE_RADIUS +touchpoints.circle.red.component=TOUCHPOINTS_CIRCLE_RED_COMPONENT +touchpoints.circle.green.component=TOUCHPOINTS_CIRCLE_GREEN_COMPONENT +touchpoints.circle.blue.component=TOUCHPOINTS_CIRCLE_BLUE_COMPONENT +touchpoints.alpha.component=TOUCHPOINTS_ALPHA_COMPONENT + +client.project.name.map=CLIENT_PROJECT_NAME_MAP +auth.key.service.map=AUTH_KEY_SERVICE_MAP + +# video processing +video.processing.max.concurrent.jobs=VIDEO_PROCESSING_MAX_CONCURRENT_JOBS +video.processing.default.timeout.seconds=VIDEO_PROCESSING_DEFAULT_TIMEOUT_SECONDS \ No newline at end of file diff --git a/alfred/config/application-ferret.properties b/alfred/config/application-ferret.properties new file mode 100644 index 0000000..e890152 --- /dev/null +++ b/alfred/config/application-ferret.properties @@ -0,0 +1,16 @@ +env=ENV +port=PORT +metrics.port=METRICS_PORT +# gin +gin.mode=GIN_MODE + +# kafka +kafka.brokers=KAFKA_BROKERS +kafka.username=KAFKA_USERNAME +kafka.password=KAFKA_PASSWORD +kafka.tls.insecureSkipVerify=KAFKA_TLS_INSECURESKIPVERIFY +kafka.tls.enabled=KAFKA_TLS_ENABLED +kafka.sasl.enabled=KAFKA_SASL_ENABLED +kafka.alfred.error.events.upload.topic=KAFKA_ALFRED_ERROR_EVENTS_UPLOAD_TOPIC + +api.key.client.map=API_KEY_CLIENT_MAP \ No newline at end of file diff --git a/alfred/config/application-ingester.properties b/alfred/config/application-ingester.properties new file mode 100644 index 0000000..6c9315c --- /dev/null +++ b/alfred/config/application-ingester.properties @@ -0,0 +1,85 @@ +env=ENV +port=PORT +metrics.port=METRICS_PORT +# gin +gin.mode=GIN_MODE + +# kafka +kafka.brokers=KAFKA_BROKERS +kafka.username=KAFKA_USERNAME +kafka.password=KAFKA_PASSWORD +kafka.tls.insecureSkipVerify=KAFKA_TLS_INSECURESKIPVERIFY +kafka.tls.enabled=KAFKA_TLS_ENABLED +kafka.sasl.enabled=KAFKA_SASL_ENABLED +kafka.alfred.mobile.event.ingestion.topic=KAFKA_ALFRED_MOBILE_EVENT_INGESTION_TOPIC +kafka.alfred.mobile.session.upload.topic=KAFKA_ALFRED_MOBILE_SESSION_UPLOAD_TOPIC +kafka.alfred.mobile.metric.ingestion.topic=KAFKA_ALFRED_MOBILE_METRIC_INGESTION_TOPIC +kafka.alfred.mobile.metric.ingestion.topic.client.map =KAFKA_ALFRED_MOBILE_METRIC_INGESTION_TOPIC_CLIENT_MAP + +# es +elasticsearch.nodes=ELASTICSEARCH_NODES +elasticsearch.username=ELASTICSEARCH_USERNAME +elasticsearch.password=ELASTICSEARCH_PASSWORD +elasticsearch.cruise.control.index=ELASTICSEARCH_CRUISE_CONTROL_INDEX +elasticsearch.tls.insecureSkipVerify=ELASTICSEARCH_TLS_INSECURESKIPVERIFY +elasticsearch.max.idle.connection=ELASTICSEARCH_MAX_IDLE_CONNECTION +elasticsearch.max.connection=ELASTICSEARCH_MAX_CONNECTION +elasticsearch.idle.connection.timout=ELASTICSEARCH_IDLE_CONNECTION_TIMEOUT + +# opentelmetry +opentelemetry.collector.url=OPENTELEMETRY_COLLECTOR_URL +opentelemetry.apm.enabled=OPENTELEMETRY_APM_ENABLED + +#litmus +litmus.alfred.experiment.name.map=LITMUS_ALFRED_EXPERIMENT_NAME_MAP +litmus.proxy.service.url=LITMUS_PROXY_SERVICE_URL + +#s3 +s3.session.upload.bucket=S3_SESSION_UPLOAD_BUCKET + +#http +http.max.idle.connection.pool=HTTP_MAX_IDLE_CONNECTION_POOL +http.max.connection=HTTP_MAX_CONNECTION +http.max.timeout.seconds=HTTP_MAX_TIMEOUT_SECONDS + +# cors config +whitelisted.domains=WHITELISTED_DOMAINS +allowed.custom.headers=ALLOWED_CUSTOM_HEADERS + +api.key.client.map=API_KEY_CLIENT_MAP +whitelisted.web.clients=WHITELISTED_WEB_CLIENTS +whitelisted.cosmos.meta.data.tags=WHITELISTED_COSMOS_META_DATA_TAGS +whitelisted.navi.app.meta.data.tags=WHITELISTED_NAVI_APP_META_DATA_TAGS +elasticsearch.cruise.control.index.client.map=ELASTICSEARCH_CRUISE_CONTROL_INDEX_CLIENT_MAP +kafka.alfred.web.session.upload.topic.client.map = KAFKA_ALFRED_WEB_SESSION_UPLOAD_TOPIC_CLIENT_MAP +kafka.alfred.mobile.session.upload.topic.client.map=KAFKA_ALFRED_MOBILE_SESSION_UPLOAD_TOPIC_CLIENT_MAP +kafka.alfred.event.topic.client.map=KAFKA_ALFRED_EVENT_TOPIC_CLIENT_MAP +s3.session.upload.bucket.client.map=S3_SESSION_UPLOAD_BUCKET_CLIENT_MAP +cruise.cache.ttl.minutes=CRUISE_CACHE_TTL_MINUTES + +# rate limiting +rate.limit.config = RATE_LIMIT_CONFIG +rate.limit.global.config.period = RATE_LIMIT_GLOBAL_CONFIG_PERIOD +rate.limit.global.config.limit = RATE_LIMIT_GLOBAL_CONFIG_LIMIT +rate.limit.clean.up.interval = RATE_LIMIT_CLEAN_UP_INTERVAL + +#FPS +frames.per.second.threshold.score.map=FRAMES_PER_SECOND_THRESHOLD_SCORE_MAP +frames.per.second.weight.map.navi=FRAMES_PER_SECOND_WEIGHT_MAP_NAVI +frames.per.second.disable.feature.map=FRAMES_PER_SECOND_DISABLE_FEATURE_MAP +frames.per.second.battery.map.navi=FRAMES_PER_SECOND_BATTERY_MAP_NAVI +frames.per.second.memory.map.navi=FRAMES_PER_SECOND_MEMORY_MAP_NAVI +frames.per.second.battery.cosmos.map=FRAMES_PER_SECOND_BATTERY_COSMOS_MAP +frames.per.second.memory.cosmos.map=FRAMES_PER_SECOND_MEMORY_COSMOS_MAP +frames.per.second.weight.cosmos.map=FRAMES_PER_SECOND_WEIGHT_COSMOS_MAP + +#timestamp validation +request.body.future.timestamp.validation.difference.in.hours=REQUEST_BODY_FUTURE_TIMESTAMP_VALIDATION_DIFFERENCE_IN_HOURS +request.body.past.timestamp.validation.difference.in.hours=REQUEST_BODY_PAST_TIMESTAMP_VALIDATION_DIFFERENCE_IN_HOURS + + +logs.device.ids=LOGS_DEVICE_IDS +web.session.data.slice.size=WEB_SESSION_DATA_SLICE_SIZE +is.web.session.data.slice.enabled.client.map=IS_WEB_SESSION_DATA_SLICE_ENABLED_CLIENT_MAP +default.factory.web.client.list=DEFAULT_FACTORY_WEB_CLIENT_LIST +default.factory.web.client.list.with.cookie.data=DEFAULT_FACTORY_WEB_CLIENT_LIST_WITH_COOKIE_DATA \ No newline at end of file diff --git a/alfred/config/config.go b/alfred/config/config.go new file mode 100644 index 0000000..7772cc9 --- /dev/null +++ b/alfred/config/config.go @@ -0,0 +1,325 @@ +package config + +import ( + "alfred/pkg/log" + "encoding/json" + "os" + "strings" + "time" + + "github.com/spf13/viper" + "go.uber.org/zap" +) + +type baseConfig struct { + Env string + Port int + MetricPort int + GinMode string +} + +type IngesterConfig struct { + BaseConfig baseConfig + KafkaConfig IngesterKafkaConfig + ElasticSearchConfig IngesterElasticSearchConfig + S3Config IngesterS3Config + HttpConfig HttpConfig + CorsConfig CorsConfig + OutboundServiceConfig IngestorOutboundServiceConfig + ApiKeyConfig map[string]string + DefaultFactoryWebClientList []string + DefaultFactoryWebClientListWithCookieData []string + WhiteListedTags WhiteListedTagsConfig + WebWhitelistedClientsConfig []string + CruiseCacheTtl time.Duration + RateLimiterConfig IngesterRateLimitConfig + FpsThresholdScoreMap map[string]interface{} + FpsWeightMapNavi map[string]interface{} + FpsDisableFeatureMap map[string]interface{} + FpsBatteryMapNavi map[string]interface{} + FpsMemoryMapNavi map[string]interface{} + FpsWeightMapCosmos map[string]interface{} + FpsBatteryMapCosmos map[string]interface{} + FpsMemoryMapCosmos map[string]interface{} + CruiseLogsDeviceIds map[string]interface{} + PastTimestampValidationDiffInHours time.Duration + FutureTimestampValidationDiffInHours time.Duration + WebSessionDataSliceSize int + IsWebSessionDataSliceEnabledClientMap map[string]string + TracerConfig TracerConfig +} + +type VideoProcessingConfig struct { + MaxConcurrentJobs int `json:"max_concurrent_jobs"` + DefaultTimeoutSeconds int `json:"default_timeout_seconds"` +} + +type CoreConfig struct { + BaseConfig baseConfig + AuthEnabled bool + ByPassAuthSources []string + ScreenTagFilters map[string][]string + VideoProcessingConfig VideoProcessingConfig + ElasticSearchConfig CoreElasticSearchConfig + OutboundServiceConfig CoreOutboundServiceConfig + FfmpegConfig FfmpegConfig + HttpConfig HttpConfig + CorsConfig CorsConfig + S3Config CoreS3Config + CruiseDropdowns string + ZipUploadTimeConfig float64 + ApiKeyClientMap map[string]string + DefaultSessionTime time.Duration + MaskingConfig MaskingConfig + DeviceMonitoringConfig CoreDeviceMonitoringConfig + ClientsWithoutDurationResponse []string + MinAppVersionForScreenNameFilter string + ZipsWithEventsAppVersions string + MaxFetchWebVideoGofuncConcurrency int64 + MaxBlurringVideoGofuncConcurrency int64 + SessionErrorEventsFilter []string + TouchPointsConfig TouchPointsConfig + ShedlockConfig CoreShedlockConfig + MaxUpdateSessionErrorEventsGofuncConcurrency int64 + ClientAppOsMap map[string]string + ClientProjectNameMap map[string]string + AuthKeyServiceMap map[string]string +} + +type CollectorConfig struct { + BaseConfig baseConfig + KafkaConfig CollectorKafkaConfig + ElasticSearchConfig CollectorElasticSearchConfig + S3Config CollectorS3Config + CorsConfig CorsConfig + CacheTimeForFragmentsIngestion time.Duration + MaxRetry int + InitialDelayInSeconds time.Duration + ErrorEventsSlackTemplateId string + SessionErrorEventsFilter []string + IngestErrorEventsFilter []string + PastTimestampValidationDiffInHours time.Duration + FutureTimestampValidationDiffInHours time.Duration + IgnoredEventTypes []string + EventListenerGoroutineGroupLimit int + WebSessionUploadListenerGoroutineGroupLimit int + SessionUploadListenerGoroutineGroupLimit int +} + +var ingesterConfig IngesterConfig +var coreConfig CoreConfig +var collectorConfig CollectorConfig + +func GetIngesterConfig() IngesterConfig { + return ingesterConfig +} + +func GetCoreConfig() CoreConfig { + return coreConfig +} + +func GetCollectorConfig() CollectorConfig { + return collectorConfig +} + +func InitIngesterConfig() { + ingesterConfig = IngesterConfig{ + BaseConfig: baseConfig{ + Env: viper.GetString("env"), + Port: viper.GetInt("port"), + MetricPort: viper.GetInt("metrics.port"), + GinMode: viper.GetString("gin.mode"), + }, + KafkaConfig: *NewIngesterKafkaConfig(), + ElasticSearchConfig: *NewIngesterElasticSearchConfig(), + S3Config: *NewIngesterS3Config(), + HttpConfig: *NewIngesterHttpConfig(), + CorsConfig: *NewCorsConfig(), + TracerConfig: NewTracerConfig(), + ApiKeyConfig: viper.GetStringMapString("api.key.client.map"), + DefaultFactoryWebClientList: strings.Split(viper.GetString("default.factory.web.client.list"), ","), + DefaultFactoryWebClientListWithCookieData: strings.Split(viper.GetString("default.factory.web.client.list.with.cookie.data"), ","), + WebWhitelistedClientsConfig: strings.Split(viper.GetString("whitelisted.web.clients"), ","), + WhiteListedTags: WhiteListedTagsConfig{ + CosmosTags: strings.Split(viper.GetString("whitelisted.cosmos.meta.data.tags"), ","), + NaviAppTags: strings.Split(viper.GetString("whitelisted.navi.app.meta.data.tags"), ","), + }, + CruiseCacheTtl: viper.GetDuration("cruise.cache.ttl.minutes"), + RateLimiterConfig: *NewIngesterRateLimitConfig(), + FpsThresholdScoreMap: viper.GetStringMap("frames.per.second.threshold.score.map"), + FpsWeightMapNavi: viper.GetStringMap("frames.per.second.weight.map.navi"), + FpsDisableFeatureMap: viper.GetStringMap("frames.per.second.disable.feature.map"), + FpsBatteryMapNavi: viper.GetStringMap("frames.per.second.battery.map.navi"), + FpsMemoryMapNavi: viper.GetStringMap("frames.per.second.memory.map.navi"), + FpsBatteryMapCosmos: viper.GetStringMap("frames.per.second.battery.cosmos.map"), + FpsMemoryMapCosmos: viper.GetStringMap("frames.per.second.memory.cosmos.map"), + FpsWeightMapCosmos: viper.GetStringMap("frames.per.second.weight.cosmos.map"), + FutureTimestampValidationDiffInHours: viper.GetDuration("request.body.future.timestamp.validation.difference.in.hours"), + PastTimestampValidationDiffInHours: viper.GetDuration("request.body.past.timestamp.validation.difference.in.hours"), + OutboundServiceConfig: *NewIngestorOutboundServiceConfig(), + CruiseLogsDeviceIds: viper.GetStringMap("logs.device.ids"), + WebSessionDataSliceSize: viper.GetInt("web.session.data.slice.size"), + IsWebSessionDataSliceEnabledClientMap: viper.GetStringMapString("is.web.session.data.slice.enabled.client.map"), + } +} + +func initCoreConfig() { + config := viper.GetString("filter.screen.tag.config") + filterScreenTagConfig := make(map[string][]string) + err := json.Unmarshal([]byte(config), &filterScreenTagConfig) + if err != nil { + log.Error("failed to unmarshal screen tag config", zap.Error(err)) + os.Exit(1) + } + + coreConfig = CoreConfig{ + VideoProcessingConfig: VideoProcessingConfig{ + MaxConcurrentJobs: viper.GetInt("video.processing.max.concurrent.jobs"), + DefaultTimeoutSeconds: viper.GetInt("video.processing.default.timeout.seconds"), + }, + BaseConfig: baseConfig{ + Env: viper.GetString("env"), + Port: viper.GetInt("port"), + MetricPort: viper.GetInt("metrics.port"), + GinMode: viper.GetString("gin.mode"), + }, + AuthEnabled: viper.GetBool("auth.enabled"), + ByPassAuthSources: strings.Split(viper.GetString("source.to.bypass"), ","), + ScreenTagFilters: filterScreenTagConfig, + ElasticSearchConfig: *NewCoreElasticSearchConfig(), + OutboundServiceConfig: *NewCoreOutboundServiceConfig(), + FfmpegConfig: *NewFfmpegConfig(), + HttpConfig: *NewCoreHttpConfig(), + CorsConfig: *NewCorsConfig(), + S3Config: *NewCoreS3Config(), + CruiseDropdowns: viper.GetString("cruise.dropdowns"), + ZipUploadTimeConfig: viper.GetFloat64("zip.upload.frequency.milliseconds"), + ApiKeyClientMap: viper.GetStringMapString("api.key.client.map"), + DefaultSessionTime: viper.GetDuration("default.app.sessions.time.hours"), + DeviceMonitoringConfig: *NewCoreDeviceMonitoringConfig(), + ClientsWithoutDurationResponse: strings.Split(viper.GetString("clients.without.duration.response"), ","), + ZipsWithEventsAppVersions: viper.GetString("video.generation.events.app.versions"), + MinAppVersionForScreenNameFilter: viper.GetString("min.app.version.for.screenname.filter"), + MaxFetchWebVideoGofuncConcurrency: viper.GetInt64("max.fetch.web.video.gofunc.concurrency"), + MaxBlurringVideoGofuncConcurrency: viper.GetInt64("max.blurring.video.gofunc.concurrency"), + SessionErrorEventsFilter: strings.Split(viper.GetString("session.error.events.filter"), ","), + TouchPointsConfig: *NewTouchPointsConfig(), + ShedlockConfig: *NewCoreShedlockConfig(), + MaskingConfig: *NewMaskingConfig(), + MaxUpdateSessionErrorEventsGofuncConcurrency: viper.GetInt64("max.update.session.error.events.gofunc.concurrency"), + ClientAppOsMap: viper.GetStringMapString("client.app.os.map"), + ClientProjectNameMap: viper.GetStringMapString("client.project.name.map"), + AuthKeyServiceMap: viper.GetStringMapString("auth.key.service.map"), + } +} + +func initCollectorConfig() { + collectorConfig = CollectorConfig{ + BaseConfig: baseConfig{ + Env: viper.GetString("env"), + Port: viper.GetInt("port"), + MetricPort: viper.GetInt("metrics.port"), + GinMode: viper.GetString("gin.mode"), + }, + KafkaConfig: *NewCollectorKafkaConfig(), + ElasticSearchConfig: *NewCollectorElasticSearchConfig(), + S3Config: *NewCollectorS3Config(), + CacheTimeForFragmentsIngestion: viper.GetDuration("cache.time.fragments.ingestion"), + MaxRetry: viper.GetInt("ingest.elasticsearch.max.retry"), + InitialDelayInSeconds: viper.GetDuration("ingest.elasticsearch.initial.delay.in.seconds"), + ErrorEventsSlackTemplateId: viper.GetString("error.events.slack.template.id"), + SessionErrorEventsFilter: strings.Split(viper.GetString("session.error.events.filter"), ","), + IngestErrorEventsFilter: strings.Split(viper.GetString("ingest.error.events.filter"), ","), + FutureTimestampValidationDiffInHours: viper.GetDuration("request.body.future.timestamp.validation.difference.in.hours"), + PastTimestampValidationDiffInHours: viper.GetDuration("request.body.past.timestamp.validation.difference.in.hours"), + IgnoredEventTypes: strings.Split(viper.GetString("ignored.event.types"), ","), + EventListenerGoroutineGroupLimit: viper.GetInt("event.listener.goroutine.group.limit"), + WebSessionUploadListenerGoroutineGroupLimit: viper.GetInt("web.session.upload.listener.goroutine.group.limit"), + SessionUploadListenerGoroutineGroupLimit: viper.GetInt("session.upload.listener.goroutine.group.limit"), + } +} + +func LoadIngesterConfig() { + viper.AutomaticEnv() + viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) + viper.SetConfigName("ingester-config") + viper.SetConfigType("properties") + viper.SetConfigFile("./config/application-ingester.properties") + + err := viper.ReadInConfig() + if err != nil { + log.Error("Error while loading ingester configuration", zap.Error(err)) + os.Exit(1) + } + InitIngesterConfig() +} + +func LoadCollectorConfig() { + viper.AutomaticEnv() + viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) + viper.SetConfigName("collector-config") + viper.SetConfigType("properties") + viper.SetConfigFile("./config/application-collector.properties") + + err := viper.ReadInConfig() + if err != nil { + log.Error("Error while loading collector configuration", zap.Error(err)) + os.Exit(1) + } + initCollectorConfig() +} + +func LoadCoreConfig() { + viper.AutomaticEnv() + viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) + viper.SetConfigName("core-config") + viper.SetConfigType("properties") + viper.SetConfigFile("./config/application-core.properties") + + err := viper.ReadInConfig() + if err != nil { + log.Error("Error while loading core configuration", zap.Error(err)) + os.Exit(1) + } + initCoreConfig() +} + +type FerretConfig struct { + BaseConfig baseConfig + KafkaConfig FerretKafkaConfig + ApiKeyConfig map[string]string +} + +var ferretConfig FerretConfig + +func GetFerretConfig() FerretConfig { + return ferretConfig +} + +func initFerretConfig() { + ferretConfig = FerretConfig{ + BaseConfig: baseConfig{ + Env: viper.GetString("env"), + Port: viper.GetInt("port"), + MetricPort: viper.GetInt("metrics.port"), + GinMode: viper.GetString("gin.mode"), + }, + KafkaConfig: *NewFerretKafkaConfig(), + ApiKeyConfig: viper.GetStringMapString("api.key.client.map"), + } +} + +func LoadFerretConfig() { + viper.AutomaticEnv() + viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) + viper.SetConfigName("ferret-config") + viper.SetConfigType("properties") + viper.SetConfigFile("./config/application-ferret.properties") + + err := viper.ReadInConfig() + if err != nil { + log.Error("Error while loading ferret configuration", zap.Error(err)) + os.Exit(1) + } + initFerretConfig() +} diff --git a/alfred/config/cors_config.go b/alfred/config/cors_config.go new file mode 100644 index 0000000..eac534c --- /dev/null +++ b/alfred/config/cors_config.go @@ -0,0 +1,18 @@ +package config + +import ( + "github.com/spf13/viper" + "strings" +) + +type CorsConfig struct { + WhitelistedDomains []string + AllowedCustomHeaders string +} + +func NewCorsConfig() *CorsConfig { + return &CorsConfig{ + WhitelistedDomains: strings.Split(viper.GetString("whitelisted.domains"), ","), + AllowedCustomHeaders: viper.GetString("allowed.custom.headers"), + } +} diff --git a/alfred/config/device_monitoring_config.go b/alfred/config/device_monitoring_config.go new file mode 100644 index 0000000..dd5dcc8 --- /dev/null +++ b/alfred/config/device_monitoring_config.go @@ -0,0 +1,27 @@ +package config + +import ( + "github.com/spf13/viper" +) + +type CoreDeviceMonitoringConfig struct { + DeviceMonitoringCronScheduleMap map[string]string + DeviceMonitoringChannelIdMap map[string]string + BatteryAlertMap map[string]interface{} + MemoryAlertMap map[string]interface{} + SlackTemplateId string + DeviceMetricsUpdateCronScheduleMap map[string]string + DeviceMetricsTotalLimitMap map[string]interface{} +} + +func NewCoreDeviceMonitoringConfig() *CoreDeviceMonitoringConfig { + return &CoreDeviceMonitoringConfig{ + DeviceMonitoringCronScheduleMap: viper.GetStringMapString("device.metrics.monitoring.cron.schedule.map"), + DeviceMonitoringChannelIdMap: viper.GetStringMapString("device.metrics.monitoring.channel.id.map"), + BatteryAlertMap: viper.GetStringMap("device.metrics.monitoring.battery.alert.map"), + MemoryAlertMap: viper.GetStringMap("device.metrics.monitoring.memory.alert.map"), + SlackTemplateId: viper.GetString("device.metrics.monitoring.slack.service.template.id"), + DeviceMetricsUpdateCronScheduleMap: viper.GetStringMapString("device.metrics.update.cron.schedule.map"), + DeviceMetricsTotalLimitMap: viper.GetStringMap("device.metrics.total.limit.map"), + } +} diff --git a/alfred/config/elasticapm-collector.properties b/alfred/config/elasticapm-collector.properties new file mode 100644 index 0000000..e3df347 --- /dev/null +++ b/alfred/config/elasticapm-collector.properties @@ -0,0 +1,2 @@ +application_packages=alfred.cmd.collector +service_name=alfred-collector \ No newline at end of file diff --git a/alfred/config/elasticapm-core.properties b/alfred/config/elasticapm-core.properties new file mode 100644 index 0000000..cc8bbcb --- /dev/null +++ b/alfred/config/elasticapm-core.properties @@ -0,0 +1,2 @@ +application_packages=alfred.cmd.core +service_name=alfred-core \ No newline at end of file diff --git a/alfred/config/elasticapm-ferret.properties b/alfred/config/elasticapm-ferret.properties new file mode 100644 index 0000000..8d8b04a --- /dev/null +++ b/alfred/config/elasticapm-ferret.properties @@ -0,0 +1,2 @@ +application_packages=alfred.cmd.ferret +service_name=alfred-ferret \ No newline at end of file diff --git a/alfred/config/elasticapm-ingester.properties b/alfred/config/elasticapm-ingester.properties new file mode 100644 index 0000000..761be9d --- /dev/null +++ b/alfred/config/elasticapm-ingester.properties @@ -0,0 +1,2 @@ +application_packages=alfred.cmd.ingester +service_name=alfred-ingester \ No newline at end of file diff --git a/alfred/config/elasticsearch_config.go b/alfred/config/elasticsearch_config.go new file mode 100644 index 0000000..333ceb7 --- /dev/null +++ b/alfred/config/elasticsearch_config.go @@ -0,0 +1,115 @@ +package config + +import ( + "github.com/spf13/viper" + "strings" + "time" +) + +type ElasticSearchBaseConfig struct { + Nodes []string + Username string + Password string + MaxIdleConnection int + MaxConnection int + IdleConnectionTimeoutInSeconds int +} + +type IngesterElasticSearchConfig struct { + BaseConfig ElasticSearchBaseConfig + CruiseControlIndex string + CruiseControlIndexClientMap map[string]string +} + +type CoreElasticSearchConfig struct { + BaseConfig ElasticSearchBaseConfig + CruiseControlIndexClientMap map[string]string + EventIngestionIndexClientMap map[string]string + SessionUploadIndexClientMap map[string]string + DeviceMetricsIndexClientMap map[string]string + WebSessionUploadIndexClientMap map[string]string + IndexCreationCronSchedule string + ErrorEventsUploadIndexClientMap map[string]string + ErrorEventsUpdateCronSchedule string + ErrorEventsLastCronTimestampIndex string + ShedLockForCronIndex string + ErrorEventsUpdateCronDelayTimeInMinutes time.Duration + VideoGenerationStatusIndexClientMap map[string]string + ElasticSearchUpdateMaxRetry int + ElasticSearchUpdateRetryBackOffInSeconds time.Duration + ErrorEventsUpdateBatchSize int +} + +type CollectorElasticSearchConfig struct { + BaseConfig ElasticSearchBaseConfig + AppEventIngestionIndexClientMap map[string]string + AppSessionUploadIndexClientMap map[string]string + AppMetricUploadIndex string + WebSessionUploadIndexClientMap map[string]string + FragmentIngestionIndex string + ErrorEventsUploadIndexClientMap map[string]string + MinWebVersionSupportingSingleDoc int64 +} + +func NewIngesterElasticSearchConfig() *IngesterElasticSearchConfig { + return &IngesterElasticSearchConfig{ + BaseConfig: ElasticSearchBaseConfig{ + Nodes: strings.Split(viper.GetString("elasticsearch.nodes"), ","), + Username: viper.GetString("elasticsearch.username"), + Password: viper.GetString("elasticsearch.password"), + MaxIdleConnection: viper.GetInt("elasticsearch.max.idle.connection"), + MaxConnection: viper.GetInt("elasticsearch.max.connection"), + IdleConnectionTimeoutInSeconds: viper.GetInt("elasticsearch.idle.connection.timout"), + }, + CruiseControlIndex: viper.GetString("elasticsearch.cruise.control.index"), + CruiseControlIndexClientMap: viper.GetStringMapString("elasticsearch.cruise.control.index.client.map"), + } +} + +func NewCoreElasticSearchConfig() *CoreElasticSearchConfig { + return &CoreElasticSearchConfig{ + BaseConfig: ElasticSearchBaseConfig{ + Nodes: strings.Split(viper.GetString("elasticsearch.nodes"), ","), + Username: viper.GetString("elasticsearch.username"), + Password: viper.GetString("elasticsearch.password"), + MaxIdleConnection: viper.GetInt("elasticsearch.max.idle.connection"), + MaxConnection: viper.GetInt("elasticsearch.max.connection"), + IdleConnectionTimeoutInSeconds: viper.GetInt("elasticsearch.idle.connection.timout"), + }, + CruiseControlIndexClientMap: viper.GetStringMapString("elasticsearch.cruise.control.index"), + EventIngestionIndexClientMap: viper.GetStringMapString("elasticsearch.event.ingestion.index"), + SessionUploadIndexClientMap: viper.GetStringMapString("elasticsearch.session.upload.index"), + DeviceMetricsIndexClientMap: viper.GetStringMapString("elasticsearch.device.metrics.index"), + WebSessionUploadIndexClientMap: viper.GetStringMapString("elasticsearch.web.session.upload.index.client.map"), + IndexCreationCronSchedule: viper.GetString("elasticsearch.index.creation.cron.schedule"), + ErrorEventsUploadIndexClientMap: viper.GetStringMapString("elasticsearch.error.events.upload.index.client.map"), + ErrorEventsUpdateCronSchedule: viper.GetString("elasticsearch.error.events.update.cron.schedule"), + ErrorEventsLastCronTimestampIndex: viper.GetString("elasticsearch.error.events.last.cron.timestamp.index"), + ShedLockForCronIndex: viper.GetString("elasticsearch.shed.lock.cron.index"), + VideoGenerationStatusIndexClientMap: viper.GetStringMapString("video.generation.status.index.client.map"), + ElasticSearchUpdateMaxRetry: viper.GetInt("elasticsearch.update.max.retry"), + ErrorEventsUpdateCronDelayTimeInMinutes: viper.GetDuration("elasticsearch.error.events.update.cron.delay.time.in.minutes"), + ElasticSearchUpdateRetryBackOffInSeconds: viper.GetDuration("elasticsearch.update.retry.backoff.seconds"), + ErrorEventsUpdateBatchSize: viper.GetInt("elasticsearch.error.events.update.batch.size"), + } +} + +func NewCollectorElasticSearchConfig() *CollectorElasticSearchConfig { + return &CollectorElasticSearchConfig{ + BaseConfig: ElasticSearchBaseConfig{ + Nodes: strings.Split(viper.GetString("elasticsearch.nodes"), ","), + Username: viper.GetString("elasticsearch.username"), + Password: viper.GetString("elasticsearch.password"), + MaxIdleConnection: viper.GetInt("elasticsearch.max.idle.connection"), + MaxConnection: viper.GetInt("elasticsearch.max.connection"), + IdleConnectionTimeoutInSeconds: viper.GetInt("elasticsearch.idle.connection.timout"), + }, + AppEventIngestionIndexClientMap: viper.GetStringMapString("elasticsearch.event.ingestion.index.client.map"), + AppSessionUploadIndexClientMap: viper.GetStringMapString("elasticsearch.session.upload.index.client.map"), + AppMetricUploadIndex: viper.GetString("elasticsearch.metric.ingestion.index"), + WebSessionUploadIndexClientMap: viper.GetStringMapString("elasticsearch.web.session.upload.index.client.map"), + FragmentIngestionIndex: viper.GetString("elasticsearch.fragment.ingestion.index"), + ErrorEventsUploadIndexClientMap: viper.GetStringMapString("elasticsearch.error.events.upload.index.client.map"), + MinWebVersionSupportingSingleDoc: viper.GetInt64("elasticsearch.min.web.version.supporting.single.doc"), + } +} diff --git a/alfred/config/ffmpeg_config.go b/alfred/config/ffmpeg_config.go new file mode 100644 index 0000000..6f930f4 --- /dev/null +++ b/alfred/config/ffmpeg_config.go @@ -0,0 +1,13 @@ +package config + +import "github.com/spf13/viper" + +type FfmpegConfig struct { + Framerate int +} + +func NewFfmpegConfig() *FfmpegConfig { + return &FfmpegConfig{ + Framerate: viper.GetInt("ffmpeg.video.generation.framerate"), + } +} diff --git a/alfred/config/http_config.go b/alfred/config/http_config.go new file mode 100644 index 0000000..51a653f --- /dev/null +++ b/alfred/config/http_config.go @@ -0,0 +1,25 @@ +package config + +import "github.com/spf13/viper" + +type HttpConfig struct { + MaxIdleConnectionPool int + MaxConnection int + MaxTimeoutInSeconds int +} + +func NewIngesterHttpConfig() *HttpConfig { + return &HttpConfig{ + MaxIdleConnectionPool: viper.GetInt("http.max.idle.connection.pool"), + MaxConnection: viper.GetInt("http.max.connection"), + MaxTimeoutInSeconds: viper.GetInt("http.max.timeout.seconds"), + } +} + +func NewCoreHttpConfig() *HttpConfig { + return &HttpConfig{ + MaxIdleConnectionPool: viper.GetInt("http.max.idle.connection.pool"), + MaxConnection: viper.GetInt("http.max.connection"), + MaxTimeoutInSeconds: viper.GetInt("http.max.timeout.seconds"), + } +} diff --git a/alfred/config/kafka_config.go b/alfred/config/kafka_config.go new file mode 100644 index 0000000..94f7552 --- /dev/null +++ b/alfred/config/kafka_config.go @@ -0,0 +1,108 @@ +package config + +import ( + "github.com/spf13/viper" + "strings" +) + +type KafkaBaseConfig struct { + Brokers []string + Username string + Password string + TlsInsureSkipVerification bool + TlsEnabled bool + SaslEnabled bool +} + +type IngesterKafkaConfig struct { + BaseConfig KafkaBaseConfig + EventIngestionTopic string + SessionUploadTopic string + MetricIngestionTopic string + SessionUploadTopicClientMap map[string]string + EventIngestionTopicClientMap map[string]string + WebSessionUploadTopicClientMap map[string]string + MetricsIngestionTopicClientMap map[string]string +} + +type CollectorKafkaConfig struct { + BaseConfig KafkaBaseConfig + EventIngestionTopic string + SessionUploadTopic string + MetricIngestionTopic string + WebSessionUploadTopic string + EventIngestionTopicGroupId string + SessionUploadTopicGroupId string + MetricIngestionTopicGroupId string + WebSessionUploadTopicGroupId string + ErrorEventsUploadTopic string + ErrorEventsSlackPushTopic string + ErrorEventsUploadTopicGroupId string + ErrorEventsUpdateTopic string + ErrorEventsUpdateTopicGroupId string +} + +func NewIngesterKafkaConfig() *IngesterKafkaConfig { + return &IngesterKafkaConfig{ + BaseConfig: KafkaBaseConfig{ + Brokers: strings.Split(viper.GetString("kafka.brokers"), ","), + Username: viper.GetString("kafka.username"), + Password: viper.GetString("kafka.password"), + TlsInsureSkipVerification: viper.GetBool("kafka.tls.insecureSkipVerify"), + SaslEnabled: viper.GetBool("kafka.sasl.enabled"), + TlsEnabled: viper.GetBool("kafka.tls.enabled"), + }, + EventIngestionTopic: viper.GetString("kafka.alfred.mobile.event.ingestion.topic"), + SessionUploadTopic: viper.GetString("kafka.alfred.mobile.session.upload.topic"), + MetricIngestionTopic: viper.GetString("kafka.alfred.mobile.metric.ingestion.topic"), + SessionUploadTopicClientMap: viper.GetStringMapString("kafka.alfred.mobile.session.upload.topic.client.map"), + WebSessionUploadTopicClientMap: viper.GetStringMapString("kafka.alfred.web.session.upload.topic.client.map"), + EventIngestionTopicClientMap: viper.GetStringMapString("kafka.alfred.event.topic.client.map"), + MetricsIngestionTopicClientMap: viper.GetStringMapString("kafka.alfred.mobile.metric.ingestion.topic.client.map"), + } +} + +func NewCollectorKafkaConfig() *CollectorKafkaConfig { + return &CollectorKafkaConfig{ + BaseConfig: KafkaBaseConfig{ + Brokers: strings.Split(viper.GetString("kafka.brokers"), ","), + Username: viper.GetString("kafka.username"), + Password: viper.GetString("kafka.password"), + TlsInsureSkipVerification: viper.GetBool("kafka.tls.insecureSkipVerify"), + SaslEnabled: viper.GetBool("kafka.sasl.enabled"), + TlsEnabled: viper.GetBool("kafka.tls.enabled"), + }, + EventIngestionTopic: viper.GetString("kafka.alfred.mobile.event.ingestion.topic"), + SessionUploadTopic: viper.GetString("kafka.alfred.mobile.session.upload.topic"), + MetricIngestionTopic: viper.GetString("kafka.alfred.mobile.metric.ingestion.topic"), + WebSessionUploadTopic: viper.GetString("kafka.alfred.web.session.upload.topic"), + EventIngestionTopicGroupId: viper.GetString("event.ingestion.listener.group.id"), + SessionUploadTopicGroupId: viper.GetString("session.upload.listener.group.id"), + MetricIngestionTopicGroupId: viper.GetString("metric.upload.listener.group.id"), + WebSessionUploadTopicGroupId: viper.GetString("web.session.upload.listener.group.id"), + ErrorEventsUploadTopic: viper.GetString("kafka.alfred.error.events.upload.topic"), + ErrorEventsSlackPushTopic: viper.GetString("kafka.alfred.error.events.slack.push.topic"), + ErrorEventsUpdateTopic: viper.GetString("kafka.alfred.error.events.update.topic"), + ErrorEventsUploadTopicGroupId: viper.GetString("error.events.upload.listener.group.id"), + ErrorEventsUpdateTopicGroupId: viper.GetString("error.events.update.listener.group.id"), + } +} + +type FerretKafkaConfig struct { + BaseConfig KafkaBaseConfig + ErrorEventsUploadTopic string +} + +func NewFerretKafkaConfig() *FerretKafkaConfig { + return &FerretKafkaConfig{ + BaseConfig: KafkaBaseConfig{ + Brokers: strings.Split(viper.GetString("kafka.brokers"), ","), + Username: viper.GetString("kafka.username"), + Password: viper.GetString("kafka.password"), + TlsInsureSkipVerification: viper.GetBool("kafka.tls.insecureSkipVerify"), + SaslEnabled: viper.GetBool("kafka.sasl.enabled"), + TlsEnabled: viper.GetBool("kafka.tls.enabled"), + }, + ErrorEventsUploadTopic: viper.GetString("kafka.alfred.error.events.upload.topic"), + } +} diff --git a/alfred/config/masking_config.go b/alfred/config/masking_config.go new file mode 100644 index 0000000..7ee724f --- /dev/null +++ b/alfred/config/masking_config.go @@ -0,0 +1,32 @@ +package config + +import ( + "github.com/spf13/viper" + "strings" +) + +type MaskingConfig struct { + MaskedScreensStrategyMap map[string]string + MaskedScreensBlurScreenRatioMap map[string]string + BufferTimeForMaskInMillis int64 + MaskingEnabled bool + DefaultBlurScreenRatio string + DefaultBlurScreenStrength string + MinAppVersionCodeClientMap map[string]string + MaskingFallBackScreens []string + DSMaskingScreensWithFaceDetectionEnabled []string +} + +func NewMaskingConfig() *MaskingConfig { + return &MaskingConfig{ + MaskedScreensStrategyMap: viper.GetStringMapString("masked.screens.strategy.map"), + MaskedScreensBlurScreenRatioMap: viper.GetStringMapString("masked.screens.blur.ratio.map"), + BufferTimeForMaskInMillis: viper.GetInt64("buffer.time.millis.mask"), + MaskingEnabled: viper.GetBool("masking.enabled"), + DefaultBlurScreenRatio: viper.GetString("default.blur.screen.ratio"), + DefaultBlurScreenStrength: viper.GetString("default.blur.screen.strength"), + MinAppVersionCodeClientMap: viper.GetStringMapString("masking.min.app.version.code.client.map"), + MaskingFallBackScreens: strings.Split(viper.GetString("masking.fallback.screens"), ","), + DSMaskingScreensWithFaceDetectionEnabled: strings.Split(viper.GetString("ds.masking.screens.with.face.detection.enabled"), ","), + } +} diff --git a/alfred/config/outbound_service_config.go b/alfred/config/outbound_service_config.go new file mode 100644 index 0000000..0ed1fdb --- /dev/null +++ b/alfred/config/outbound_service_config.go @@ -0,0 +1,40 @@ +package config + +import "github.com/spf13/viper" + +type CoreOutboundServiceConfig struct { + CustomerProfileServiceUrl string + CustomerFederationLayerUrl string + CustomerFederationLayerTenantId string + MjolnirServiceUrl string + MjolnirRealmId string + AlfredIngestorUrl string + AlfredIngestorClientApiKeyMap map[string]string + SlackServiceUrl string + DataScienceServiceMaskingBaseUrl string +} +type IngestorOutboundServiceConfig struct { + LitmusServiceUrl string + LitmusAlfredExperimentMap map[string]string +} + +func NewCoreOutboundServiceConfig() *CoreOutboundServiceConfig { + return &CoreOutboundServiceConfig{ + CustomerProfileServiceUrl: viper.GetString("customer.profile.service.url"), + CustomerFederationLayerUrl: viper.GetString("customer.federation.layer.url"), + CustomerFederationLayerTenantId: viper.GetString("customer.federation.tenant.id"), + MjolnirServiceUrl: viper.GetString("mjolnir.service.url"), + MjolnirRealmId: viper.GetString("mjolnir.realm.id"), + AlfredIngestorUrl: viper.GetString("alfred.ingestor.base.url"), + AlfredIngestorClientApiKeyMap: viper.GetStringMapString("ingestor.client.api.key.map"), + SlackServiceUrl: viper.GetString("slack.service.url"), + DataScienceServiceMaskingBaseUrl: viper.GetString("data.science.service.masking.base.url"), + } +} + +func NewIngestorOutboundServiceConfig() *IngestorOutboundServiceConfig { + return &IngestorOutboundServiceConfig{ + LitmusServiceUrl: viper.GetString("litmus.proxy.service.url"), + LitmusAlfredExperimentMap: viper.GetStringMapString("litmus.alfred.experiment.name.map"), + } +} diff --git a/alfred/config/rate_limiting_config.go b/alfred/config/rate_limiting_config.go new file mode 100644 index 0000000..0b65fa0 --- /dev/null +++ b/alfred/config/rate_limiting_config.go @@ -0,0 +1,22 @@ +package config + +import ( + "github.com/spf13/viper" + "time" +) + +type IngesterRateLimitConfig struct { + RateLimiterConfig map[string]string + RateLimiterConfigGlobalPeriod time.Duration + RateLimiterConfigGlobalLimit int64 + RateLimiterCleanUpInterval time.Duration +} + +func NewIngesterRateLimitConfig() *IngesterRateLimitConfig { + return &IngesterRateLimitConfig{ + RateLimiterConfig: viper.GetStringMapString("rate.limit.config"), + RateLimiterConfigGlobalPeriod: viper.GetDuration("rate.limit.global.config.period"), + RateLimiterConfigGlobalLimit: viper.GetInt64("rate.limit.global.config.limit"), + RateLimiterCleanUpInterval: viper.GetDuration("rate.limit.clean.up.interval"), + } +} diff --git a/alfred/config/s3_config.go b/alfred/config/s3_config.go new file mode 100644 index 0000000..c364d85 --- /dev/null +++ b/alfred/config/s3_config.go @@ -0,0 +1,41 @@ +package config + +import "github.com/spf13/viper" + +type IngesterS3Config struct { + SessionUploadBucket string + SessionUploadBucketClientMap map[string]string +} + +type CoreS3Config struct { + SessionUploadBucketClientMap map[string]string + VideoUploadBucketClientMap map[string]string + WebSessionBucketClientMap map[string]string + MinWebVersionSupportingFolderUpload int64 +} + +type CollectorS3Config struct { + WebSessionBucketClientMap map[string]string +} + +func NewIngesterS3Config() *IngesterS3Config { + return &IngesterS3Config{ + SessionUploadBucket: viper.GetString("s3.session.upload.bucket"), + SessionUploadBucketClientMap: viper.GetStringMapString("s3.session.upload.bucket.client.map"), + } +} + +func NewCoreS3Config() *CoreS3Config { + return &CoreS3Config{ + SessionUploadBucketClientMap: viper.GetStringMapString("s3.session.upload.bucket"), + VideoUploadBucketClientMap: viper.GetStringMapString("s3.video.upload.bucket"), + WebSessionBucketClientMap: viper.GetStringMapString("s3.web.session.upload.bucket.client.map"), + MinWebVersionSupportingFolderUpload: viper.GetInt64("s3.min.web.version.supporting.folder.upload"), + } +} + +func NewCollectorS3Config() *CollectorS3Config { + return &CollectorS3Config{ + WebSessionBucketClientMap: viper.GetStringMapString("s3.web.session.upload.bucket.client.map"), + } +} diff --git a/alfred/config/shedlock_config.go b/alfred/config/shedlock_config.go new file mode 100644 index 0000000..800549f --- /dev/null +++ b/alfred/config/shedlock_config.go @@ -0,0 +1,22 @@ +package config + +import ( + "github.com/spf13/viper" + "time" +) + +type CoreShedlockConfig struct { + DeviceMetricsCronLockedUntil time.Duration + SessionIndexCreationCronLockedUntil time.Duration + EventIndexCreationCronLockedUntil time.Duration + ErrorEventUpdateCronLockedUntil time.Duration +} + +func NewCoreShedlockConfig() *CoreShedlockConfig { + return &CoreShedlockConfig{ + DeviceMetricsCronLockedUntil: viper.GetDuration("device.metrics.cron.locked.until"), + SessionIndexCreationCronLockedUntil: viper.GetDuration("session.index.creation.cron.locked.until"), + EventIndexCreationCronLockedUntil: viper.GetDuration("event.index.creation.cron.locked.until"), + ErrorEventUpdateCronLockedUntil: viper.GetDuration("error.event.update.cron.locked.until"), + } +} diff --git a/alfred/config/touch_points_config.go b/alfred/config/touch_points_config.go new file mode 100644 index 0000000..4beb1f7 --- /dev/null +++ b/alfred/config/touch_points_config.go @@ -0,0 +1,27 @@ +package config + +import ( + "github.com/spf13/viper" +) + +type TouchPointsConfig struct { + Enabled bool + MinAppVersionCodeClientMap map[string]string + TouchPointCircleRadius int + TouchPointRedComponent int + TouchPointGreenComponent int + TouchPointBlueComponent int + TouchPointAlphaComponent int +} + +func NewTouchPointsConfig() *TouchPointsConfig { + return &TouchPointsConfig{ + Enabled: viper.GetBool("touchpoints.enabled"), + MinAppVersionCodeClientMap: viper.GetStringMapString("touchpoints.min.app.version.code.client.map"), + TouchPointCircleRadius: viper.GetInt("touchpoints.circle.radius"), + TouchPointRedComponent: viper.GetInt("touchpoints.circle.red.component"), + TouchPointGreenComponent: viper.GetInt("touchpoints.circle.green.component"), + TouchPointBlueComponent: viper.GetInt("touchpoints.circle.blue.component"), + TouchPointAlphaComponent: viper.GetInt("touchpoints.alpha.component"), + } +} diff --git a/alfred/config/tracer_config.go b/alfred/config/tracer_config.go new file mode 100644 index 0000000..2f1a76a --- /dev/null +++ b/alfred/config/tracer_config.go @@ -0,0 +1,15 @@ +package config + +import "github.com/spf13/viper" + +type TracerConfig struct { + Url string + APMEnabled bool +} + +func NewTracerConfig() TracerConfig { + return TracerConfig{ + Url: viper.GetString("opentelemetry.collector.url"), + APMEnabled: viper.GetBool("opentelemetry.apm.enabled"), + } +} diff --git a/alfred/config/whitelisted_tags_config.go b/alfred/config/whitelisted_tags_config.go new file mode 100644 index 0000000..52cdef8 --- /dev/null +++ b/alfred/config/whitelisted_tags_config.go @@ -0,0 +1,6 @@ +package config + +type WhiteListedTagsConfig struct { + CosmosTags []string + NaviAppTags []string +} diff --git a/alfred/constant/.gitignore b/alfred/constant/.gitignore new file mode 100644 index 0000000..e69de29 diff --git a/alfred/deployment/.gitignore b/alfred/deployment/.gitignore new file mode 100644 index 0000000..e69de29 diff --git a/alfred/docker-compose.yml b/alfred/docker-compose.yml new file mode 100644 index 0000000..5950b10 --- /dev/null +++ b/alfred/docker-compose.yml @@ -0,0 +1,87 @@ +version: "3.2" +services: + redis: + image: "redis:alpine" + container_name: 'alfred_redis' + ports: + - '6379:6379' + logging: + driver: none + networks: + - alfred_net + + core-app: + image: golang + container_name: "alfred_core" + depends_on: + - redis + volumes: + - .:/app + ports: + - '8080:8080' + working_dir: /app + networks: + - alfred_net + env_file: + - ./config/core.env + build: + context: . + dockerfile: Dockerfile.core + + ingester-app: + image: golang + container_name: "alfred_ingester" + depends_on: + - redis + volumes: + - .:/app + ports: + - '8081:8081' + working_dir: /app + networks: + - alfred_net + env_file: + - ./config/ingester.env + build: + context: . + dockerfile: Dockerfile.ingester + + collector-app: + image: golang + container_name: "alfred_collector" + depends_on: + - redis + volumes: + - .:/app + ports: + - '8082:8082' + working_dir: /app + networks: + - alfred_net + env_file: + - ./config/collector.env + build: + context: . + dockerfile: Dockerfile.collector + + ferret-app: + image: golang + container_name: "alfred_ferret" + depends_on: + - redis + volumes: + - .:/app + ports: + - '8083:8083' + working_dir: /app + networks: + - alfred_net + env_file: + - ./config/ferret.env + build: + context: . + dockerfile: Dockerfile.ferret + +networks: + alfred_net: + driver: bridge \ No newline at end of file diff --git a/alfred/go.mod b/alfred/go.mod new file mode 100644 index 0000000..afcb30c --- /dev/null +++ b/alfred/go.mod @@ -0,0 +1,148 @@ +module alfred + +go 1.23.8 + +require ( + github.com/Shopify/sarama v1.38.1 + github.com/aws/aws-sdk-go-v2 v1.32.5 + github.com/aws/aws-sdk-go-v2/config v1.18.8 + github.com/aws/aws-sdk-go-v2/service/s3 v1.68.0 + github.com/elastic/go-elasticsearch/v8 v8.12.0 + github.com/gin-contrib/pprof v1.5.1 + github.com/gin-contrib/zap v1.1.4 + github.com/gin-gonic/gin v1.10.0 + github.com/google/uuid v1.6.0 + github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75 + github.com/navi-commons/go-tracer v0.0.6 + github.com/patrickmn/go-cache v2.1.0+incompatible + github.com/prometheus/client_golang v1.20.5 + github.com/robfig/cron/v3 v3.0.1 + github.com/stretchr/testify v1.9.0 + github.com/u2takey/ffmpeg-go v0.5.0 + github.com/u2takey/go-utils v0.3.1 + github.com/ulikunitz/xz v0.5.12 + github.com/ulule/limiter/v3 v3.11.2 + github.com/xdg-go/scram v1.1.2 + go.elastic.co/apm/module/apmgin/v2 v2.6.2 + go.elastic.co/ecszap v1.0.3 + go.uber.org/automaxprocs v1.6.0 + go.uber.org/goleak v1.3.0 + go.uber.org/zap v1.27.0 + golang.org/x/image v0.22.0 + golang.org/x/sync v0.9.0 + google.golang.org/grpc v1.68.0 +) + +require ( + github.com/armon/go-radix v1.0.0 // indirect + github.com/aws/aws-sdk-go v1.55.5 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.13.8 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.21 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.24 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.24 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.3.28 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.24 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.5 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.5 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.5 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.12.0 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.18.0 // indirect + github.com/aws/smithy-go v1.22.1 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/bytedance/sonic v1.12.4 // indirect + github.com/bytedance/sonic/loader v0.2.1 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cloudwego/base64x v0.1.4 // indirect + github.com/cloudwego/iasm v0.2.0 // indirect + github.com/elastic/elastic-transport-go/v8 v8.4.0 // indirect + github.com/elastic/go-sysinfo v1.15.0 // indirect + github.com/elastic/go-windows v1.0.2 // indirect + github.com/fsnotify/fsnotify v1.8.0 // indirect + github.com/gabriel-vasile/mimetype v1.4.7 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/klauspost/cpuid/v2 v2.2.9 // indirect + github.com/magiconair/properties v1.8.7 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.60.1 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + github.com/sagikazarmark/locafero v0.6.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.7.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/twitchyliquid64/golang-asm v0.15.1 // indirect + github.com/xdg-go/pbkdf2 v1.0.0 // indirect + github.com/xdg-go/stringprep v1.0.4 // indirect + go.elastic.co/apm/module/apmhttp/v2 v2.6.2 // indirect + go.elastic.co/apm/v2 v2.6.2 // indirect + go.elastic.co/fastjson v1.4.0 // indirect + go.opentelemetry.io/otel v1.32.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 // indirect + go.opentelemetry.io/otel/metric v1.32.0 // indirect + go.opentelemetry.io/otel/sdk v1.32.0 // indirect + go.opentelemetry.io/otel/trace v1.32.0 // indirect + go.opentelemetry.io/proto/otlp v1.3.1 // indirect + golang.org/x/arch v0.12.0 // indirect + golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241118233622-e639e219e697 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241118233622-e639e219e697 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + howett.net/plist v1.0.1 // indirect +) + +require ( + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/eapache/go-resiliency v1.7.0 // indirect + github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect + github.com/eapache/queue v1.1.0 // indirect + github.com/gin-contrib/sse v0.1.0 // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/go-playground/validator/v10 v10.23.0 // indirect + github.com/goccy/go-json v0.10.3 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-uuid v1.0.3 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jcmturner/aescts/v2 v2.0.0 // indirect + github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect + github.com/jcmturner/gofork v1.7.6 // indirect + github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect + github.com/jcmturner/rpc/v2 v2.0.3 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.17.11 // indirect + github.com/leodido/go-urn v1.4.0 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/pelletier/go-toml/v2 v2.2.3 // indirect + github.com/pierrec/lz4/v4 v4.1.21 // indirect + github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect + github.com/spf13/cobra v1.8.1 + github.com/spf13/pflag v1.0.5 // indirect + github.com/spf13/viper v1.19.0 + github.com/ugorji/go/codec v1.2.12 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/crypto v0.29.0 // indirect + golang.org/x/net v0.31.0 // indirect + golang.org/x/sys v0.27.0 // indirect + golang.org/x/text v0.20.0 // indirect + google.golang.org/protobuf v1.35.2 // indirect +) diff --git a/alfred/internal/clients/alfred_ingestor_client.go b/alfred/internal/clients/alfred_ingestor_client.go new file mode 100644 index 0000000..59a78c3 --- /dev/null +++ b/alfred/internal/clients/alfred_ingestor_client.go @@ -0,0 +1,59 @@ +package clients + +import ( + "alfred/config" + "alfred/model/clients" + "alfred/pkg/log" + "alfred/utils" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" +) + +type AlfredIngestorClient struct { + HttpClient *http.Client +} + +func NewAlfredIngestorClient(httpClient *http.Client) *AlfredIngestorClient { + return &AlfredIngestorClient{ + HttpClient: httpClient, + } +} + +const ( + invalidateCacheUrl = "%s/invalidate/cache?appOs=%s&appVersionName=%s" +) + +func (m *AlfredIngestorClient) InvalidateCache(appVersionName string, clientName string) (*clients.CruiseCacheInvalidateResponse, error) { + if appVersionName == "null" { + return nil, errors.New("unauthorized request") + } + client := m.HttpClient + appOs := config.GetCoreConfig().ClientAppOsMap[clientName] + req, _ := http.NewRequest("POST", fmt.Sprintf(invalidateCacheUrl, config.GetCoreConfig().OutboundServiceConfig.AlfredIngestorUrl, appOs, appVersionName), nil) + apiKey := config.GetCoreConfig().OutboundServiceConfig.AlfredIngestorClientApiKeyMap[clientName] + req.Header.Add(utils.X_API_KEY, apiKey) + resp, err := client.Do(req) + if err != nil { + return nil, err + } + + defer resp.Body.Close() + responseBody, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + var response clients.CruiseCacheInvalidateResponse + err = json.Unmarshal(responseBody, &response) + if err != nil { + return nil, err + } + log.Info(fmt.Sprintf("%v", response)) + if response.StatusCode != 200 { + return nil, errors.New("cache invalidation failed in ingestor") + } + return &response, nil +} diff --git a/alfred/internal/clients/customer_federation.go b/alfred/internal/clients/customer_federation.go new file mode 100644 index 0000000..adb7ea8 --- /dev/null +++ b/alfred/internal/clients/customer_federation.go @@ -0,0 +1,47 @@ +package clients + +import ( + "alfred/config" + "alfred/model/clients" + "alfred/utils" + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" +) + +type CustomerFederationClient struct { + HttpClient *http.Client +} + +func (cf *CustomerFederationClient) GetDeviceIdFromCustomerId(customerId string) ([]string, error) { + jsonBody := fmt.Sprintf(utils.Query, customerId) + client := cf.HttpClient + req, _ := http.NewRequest("POST", + fmt.Sprintf(utils.CustomerFederationUrlByReferenceId, config.GetCoreConfig().OutboundServiceConfig.CustomerFederationLayerUrl), + bytes.NewBuffer([]byte(jsonBody))) + req.Header.Add(utils.TenantIdHeadersKey, config.GetCoreConfig().OutboundServiceConfig.CustomerFederationLayerTenantId) + resp, err := client.Do(req) + if err != nil { + return nil, err + } + responseBody, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var response clients.CustomerFederationResponse + err = json.Unmarshal(responseBody, &response) + if err != nil { + return nil, err + } + var result []string + for _, data := range response.Data.GetCustomer.GetDevice { + dataLocal := data + result = append(result, dataLocal.DeviceId) + } + + return result, nil +} diff --git a/alfred/internal/clients/customer_service.go b/alfred/internal/clients/customer_service.go new file mode 100644 index 0000000..8e00119 --- /dev/null +++ b/alfred/internal/clients/customer_service.go @@ -0,0 +1,107 @@ +package clients + +import ( + "alfred/config" + "alfred/model/clients" + "alfred/utils" + "encoding/json" + "errors" + "fmt" + "github.com/google/uuid" + "io" + "net/http" +) + +type CustomerServiceClient struct { + HttpClient *http.Client +} + +func (cs *CustomerServiceClient) GetReferenceIdByPhoneNumber(phoneNumber string) (string, error) { + baseURL := config.GetCoreConfig().OutboundServiceConfig.CustomerProfileServiceUrl + url := fmt.Sprintf(utils.CustomerProfileServiceUrlByPhoneNumber, baseURL, phoneNumber) + + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return "", errors.New("failed to create request: " + err.Error()) + } + + newUUID, err := uuid.NewUUID() + if err != nil { + return "", errors.New("failed to generate UUID: " + err.Error()) + } + + req.Header.Add(utils.CoorelationId, newUUID.String()) + req.Header.Add(utils.TeamName, utils.CRMTENANT) + + resp, err := cs.HttpClient.Do(req) + if err != nil { + return "", errors.New("request failed: " + err.Error()) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return "", errors.New("unexpected status code: " + string(resp.StatusCode)) + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + return "", errors.New("failed to read response: " + err.Error()) + } + + var response []clients.CustomerServiceAPIResponse + err = json.Unmarshal(body, &response) + if err != nil { + return "", errors.New("JSON unmarshal failed " + err.Error()) + } + + if len(response) == 0 { + return "", errors.New("customer not found from phone number") + } + + return response[0].CustomerReferenceId, nil +} + +func (cs *CustomerServiceClient) GetCustomerRefId(externalId string) (string, error) { + baseURL := config.GetCoreConfig().OutboundServiceConfig.CustomerProfileServiceUrl + url := fmt.Sprintf(utils.CustomerProfileServiceUrlByExternalId, baseURL, externalId) + + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return "", errors.New("failed to create request: " + err.Error()) + } + + newUUID, err := uuid.NewUUID() + if err != nil { + return "", errors.New("failed to generate UUID: " + err.Error()) + } + + req.Header.Add(utils.CoorelationId, newUUID.String()) + req.Header.Add(utils.TeamName, utils.CRMTENANT) + + resp, err := cs.HttpClient.Do(req) + if err != nil { + return "", errors.New("request failed: " + err.Error()) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return "", errors.New("unexpected status code: " + string(resp.StatusCode)) + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + return "", errors.New("failed to read response: " + err.Error()) + } + + var response []clients.CustomerServiceAPIResponse + err = json.Unmarshal(body, &response) + if err != nil { + return "", errors.New("JSON unmarshal failed " + err.Error()) + } + + if len(response) == 0 { + return "", errors.New("customer not found from externalId") + } + + return response[0].CustomerReferenceId, nil +} diff --git a/alfred/internal/clients/data_science_client.go b/alfred/internal/clients/data_science_client.go new file mode 100644 index 0000000..d9fad52 --- /dev/null +++ b/alfred/internal/clients/data_science_client.go @@ -0,0 +1,75 @@ +package clients + +import ( + "alfred/config" + "alfred/internal/metrics" + "alfred/model/clients" + "alfred/pkg/log" + "alfred/utils" + "bytes" + "encoding/json" + "go.uber.org/zap" + "io" + "net/http" + "strconv" +) + +type DataScienceClient struct { + HttpClient *http.Client +} + +// DataScienceMaskingBody is a struct representing your JSON structure +type DataScienceMaskingBody struct { + GetURL string `json:"get_url"` + PutURL string `json:"put_url"` + UseFaceDetection bool `json:"use_face_detection"` +} + +func (ds *DataScienceClient) MaskImages(screen, presignedDownloadUrl, presignedUploadUrl string) (bool, error) { + + reqBody := checkIsDsMaskingEnabled(screen, presignedDownloadUrl, presignedUploadUrl) + reqURL := config.GetCoreConfig().OutboundServiceConfig.DataScienceServiceMaskingBaseUrl + utils.FORWARD_SLASH + utils.DsMaskingUploadUrl + log.Info("Calling data science masking service", zap.String("screenName", screen)) + + response, err := ds.HttpClient.Post(reqURL, utils.ApplicationJsonContentType, bytes.NewReader(reqBody)) + + if err != nil || response.StatusCode != http.StatusOK { + log.Error("Error while calling data science masking service", zap.Error(err), zap.Any("response", response)) + metrics.DSApiRequestFailureCounter.WithLabelValues(reqURL, strconv.Itoa(response.StatusCode)).Inc() + return false, err + } + + var responseBody *clients.DataScienceMaskingServiceResponse + body, err := io.ReadAll(response.Body) + if err != nil { + log.Error("Error while reading response body in ds client", zap.Error(err)) + return false, err + } + err = json.Unmarshal(body, &responseBody) + if err != nil || responseBody.S3Response != utils.DsMaskingSuccessResponse { + log.Error("Error in responseBody in data science masking service", zap.Error(err)) + return false, err + } + defer response.Body.Close() + + metrics.DSApiRequestSuccessCounter.WithLabelValues(reqURL, strconv.Itoa(response.StatusCode)).Inc() + return true, nil +} + +func checkIsDsMaskingEnabled(screen, presignedDownloadUrl, presignedUploadUrl string) []byte { + + screenListWithFaceEnabled := config.GetCoreConfig().MaskingConfig.DSMaskingScreensWithFaceDetectionEnabled + isDsMaskingWithFaceEnabled := false + if utils.Contains(screenListWithFaceEnabled, screen) { + isDsMaskingWithFaceEnabled = true + } + + maskReqBody := DataScienceMaskingBody{ + GetURL: presignedDownloadUrl, + PutURL: presignedUploadUrl, + UseFaceDetection: isDsMaskingWithFaceEnabled, + } + + jsonBody, _ := json.Marshal(maskReqBody) + return jsonBody +} diff --git a/alfred/internal/clients/http_config.go b/alfred/internal/clients/http_config.go new file mode 100644 index 0000000..9da23d0 --- /dev/null +++ b/alfred/internal/clients/http_config.go @@ -0,0 +1,23 @@ +package clients + +import ( + "alfred/config" + "net/http" + "time" +) + +type HttpClient struct { + HttpClient *http.Client +} + +func NewHttpClient(httpConfig config.HttpConfig) *HttpClient { + return &HttpClient{ + HttpClient: &http.Client{ + Transport: &http.Transport{ + MaxIdleConns: httpConfig.MaxIdleConnectionPool, + MaxConnsPerHost: httpConfig.MaxConnection, + }, + Timeout: time.Duration(httpConfig.MaxTimeoutInSeconds) * time.Second, + }, + } +} diff --git a/alfred/internal/clients/litmus_proxy.go b/alfred/internal/clients/litmus_proxy.go new file mode 100644 index 0000000..c2f8d76 --- /dev/null +++ b/alfred/internal/clients/litmus_proxy.go @@ -0,0 +1,44 @@ +package clients + +import ( + "alfred/config" + "alfred/utils" + "encoding/json" + "fmt" + "io" + "net/http" +) + +type LitmusProxyClient interface { + GetLitmusExperimentOutput(deviceId string, experimentName string) bool +} + +type LitmusProxyClientImpl struct { + HttpClient *http.Client +} + +func (lp *LitmusProxyClientImpl) GetLitmusExperimentOutput(deviceId string, experimentName string) bool { + client := lp.HttpClient + req, _ := http.NewRequest("GET", + fmt.Sprintf(utils.LitmusProxyUrl, config.GetIngesterConfig().OutboundServiceConfig.LitmusServiceUrl, experimentName), + nil) + req.Header.Add(utils.DeviceIdHeadersKey, deviceId) + resp, err := client.Do(req) + if err != nil { + return false + } + responseBody, err := io.ReadAll(resp.Body) + defer resp.Body.Close() + + if err != nil { + return false + } + result := map[string]interface{}{} + if err := json.Unmarshal(responseBody, &result); err != nil { + return false + } + if isAlfredEnabled, ok := result["result"].(bool); ok { + return isAlfredEnabled + } + return false +} diff --git a/alfred/internal/clients/mjolnir.go b/alfred/internal/clients/mjolnir.go new file mode 100644 index 0000000..9810c77 --- /dev/null +++ b/alfred/internal/clients/mjolnir.go @@ -0,0 +1,58 @@ +package clients + +import ( + "alfred/model/clients" + "alfred/pkg/log" + "alfred/utils" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" +) + +type MjolnirClient struct { + HttpClient *http.Client + baseUrl string + realmId string +} + +func NewMjolnirClient(httpClient *http.Client, baseUrl, realmId string) *MjolnirClient { + return &MjolnirClient{ + HttpClient: httpClient, + baseUrl: baseUrl, + realmId: realmId, + } +} + +const ( + url = "%s/session/%s" +) + +func (m *MjolnirClient) GetSessionResponse(sessionToken string) (*clients.MjolnirSessionResponse, error) { + if sessionToken == "null" { + return nil, errors.New("unauthorized request") + } + client := m.HttpClient + req, _ := http.NewRequest("GET", fmt.Sprintf(url, m.baseUrl, m.realmId), nil) + req.Header.Add(utils.X_SESSION_TOKEN, sessionToken) + resp, err := client.Do(req) + if err != nil { + return nil, err + } + + defer resp.Body.Close() + responseBody, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + var response clients.MjolnirSessionResponse + err = json.Unmarshal(responseBody, &response) + if err != nil { + return nil, err + } + log.Info(fmt.Sprintf("%v", response)) + + return &response, nil +} diff --git a/alfred/internal/clients/slack_service_client.go b/alfred/internal/clients/slack_service_client.go new file mode 100644 index 0000000..9e31a59 --- /dev/null +++ b/alfred/internal/clients/slack_service_client.go @@ -0,0 +1,72 @@ +package clients + +import ( + "alfred/config" + "alfred/pkg/log" + "bytes" + "encoding/json" + "fmt" + "go.uber.org/zap" + "io" + "net/http" +) + +type SlackServiceClient interface { + SendMessageForDeviceMetrics(messageVariables map[string]interface{}) +} + +type SlackServiceClientImpl struct { + HttpClient *http.Client +} + +type SlackRequest struct { + Data map[string]interface{} `json:"data"` + TemplateId string `json:"templateId"` +} + +type SlackResponse struct { + Status string `json:"status"` + Medium string `json:"medium"` +} + +func (ss *SlackServiceClientImpl) SendMessageForDeviceMetrics(messageVariables map[string]interface{}) { + + requestBody := SlackRequest{ + Data: messageVariables, + TemplateId: config.GetCoreConfig().DeviceMonitoringConfig.SlackTemplateId, + } + jsonRequest, err := json.Marshal(requestBody) + if err != nil { + return + } + reader := bytes.NewReader(jsonRequest) + client := ss.HttpClient + req, _ := http.NewRequest("POST", + fmt.Sprintf(config.GetCoreConfig().OutboundServiceConfig.SlackServiceUrl), + reader) + req.Header.Add("Content-Type", "application/json") + resp, err := client.Do(req) + if err != nil { + return + } + responseJson, err := io.ReadAll(resp.Body) + defer resp.Body.Close() + + if err != nil { + log.Error("Response Failed ", zap.Error(err)) + return + } + + var respBody SlackResponse + err = json.Unmarshal(responseJson, &respBody) + if err != nil { + log.Error("Json Unmarshal Failed For Device Metrics") + return + } + + if respBody.Status == "INITIATED" { + log.Info("Message Sent To Slack For Device Metrics") + return + } + +} diff --git a/alfred/internal/infra/cron.go b/alfred/internal/infra/cron.go new file mode 100644 index 0000000..cc11bae --- /dev/null +++ b/alfred/internal/infra/cron.go @@ -0,0 +1,21 @@ +package infra + +import ( + "alfred/pkg/log" + "github.com/robfig/cron/v3" + "go.uber.org/zap" +) + +func ScheduleJob(schedule string, cronJobFunction func()) (cron.EntryID, error) { + c := cron.New() + entryId, err := c.AddFunc(schedule, cronJobFunction) + if err != nil { + return entryId, err + } + + log.Info("scheduled cron job", zap.String("schedule", schedule)) + + c.Run() + + return entryId, nil +} diff --git a/alfred/internal/metrics/.gitignore b/alfred/internal/metrics/.gitignore new file mode 100644 index 0000000..e69de29 diff --git a/alfred/internal/metrics/app_metric_publisher.go b/alfred/internal/metrics/app_metric_publisher.go new file mode 100644 index 0000000..1f542b7 --- /dev/null +++ b/alfred/internal/metrics/app_metric_publisher.go @@ -0,0 +1,154 @@ +package metrics + +import ( + "alfred/model/ingester" + "alfred/pkg/log" + "alfred/utils" + "go.uber.org/zap" + "strconv" +) + +type AppMetricsPublisher interface { + PublishMetrics(metricAttributes map[string]interface{}, metricType ingester.MetricType, baseAttributes ingester.BaseAttributes) +} + +type AppMetricsPublisherImpl struct { +} + +func NewAppMetricPublisher() *AppMetricsPublisherImpl { + return &AppMetricsPublisherImpl{} +} + +func (amp *AppMetricsPublisherImpl) PublishMetrics(metricAttributes map[string]interface{}, metricType ingester.MetricType, baseAttributes ingester.BaseAttributes) { + switch metricType { + case ingester.API_METRICS: + { + if err := publishApiMetric(metricAttributes, baseAttributes); err != nil { + log.Error("error while publishing api metricAttributes", zap.Error(err)) + } + return + } + case ingester.CRASH_METRICS: + { + if err := publishCrashMetric(metricAttributes, baseAttributes); err != nil { + log.Error("error while publishing api metricAttributes", zap.Error(err)) + } + return + } + case ingester.SCREEN_TRANSITION_METRICS: + { + if err := publishScreenTransitionMetric(metricAttributes); err != nil { + log.Error("error while publishing api metricAttributes", zap.Error(err)) + } + return + } + case ingester.APP_METRICS: + { + if err := publishAppPerformanceMetric(metricAttributes, baseAttributes); err != nil { + log.Error("error while publishing api metricAttributes", zap.Error(err)) + } + return + } + default: + { + return + } + } +} + +func publishCrashMetric(metricAttributes map[string]interface{}, baseAttributes ingester.BaseAttributes) (err error) { + var crashMetrics ingester.CrashMetric + if err := utils.Convert[map[string]interface{}, ingester.CrashMetric](metricAttributes, &crashMetrics); err != nil { + return err + } + + defer func() { + if r := recover(); r != nil { + err = r.(error) + } + }() + AppCrashTotal.WithLabelValues(baseAttributes.AppVersionCode, baseAttributes.AppOS).Inc() + return +} + +func publishApiMetric(metricAttributes map[string]interface{}, baseAttributes ingester.BaseAttributes) (err error) { + var apiMetrics ingester.ApiMetric + if err := utils.Convert[map[string]interface{}, ingester.ApiMetric](metricAttributes, &apiMetrics); err != nil { + return err + } + + defer func() { + if r := recover(); r != nil { + err = r.(error) + } + }() + ApiRequestCounter.WithLabelValues(apiMetrics.Url, apiMetrics.Method, strconv.Itoa(int(apiMetrics.ResponseCode)), + baseAttributes.AppVersionCode, apiMetrics.ScreenName, baseAttributes.AppOS, apiMetrics.ModuleName).Inc() + ApiRequestLatencySum.WithLabelValues(apiMetrics.Url, apiMetrics.Method, strconv.Itoa(int(apiMetrics.ResponseCode)), + baseAttributes.AppVersionCode, apiMetrics.ScreenName, baseAttributes.AppOS, apiMetrics.ModuleName).Add(float64(apiMetrics.DurationInMs)) + ApiRequestLatencyHistogram.WithLabelValues(apiMetrics.Url, apiMetrics.Method, strconv.Itoa(int(apiMetrics.ResponseCode)), + baseAttributes.AppVersionCode, apiMetrics.ScreenName, baseAttributes.AppOS, apiMetrics.ModuleName).Observe(float64(apiMetrics.DurationInMs)) + apiRequestLatencySummary.WithLabelValues(apiMetrics.Url, apiMetrics.Method, strconv.Itoa(int(apiMetrics.ResponseCode)), + baseAttributes.AppVersionCode, apiMetrics.ScreenName, baseAttributes.AppOS, apiMetrics.ModuleName).Observe(float64(apiMetrics.DurationInMs)) + return +} + +func publishScreenTransitionMetric(metricAttributes map[string]interface{}) (err error) { + var screenTransitionMetrics ingester.ScreenTransitionMetric + if err := utils.Convert[map[string]interface{}, ingester.ScreenTransitionMetric](metricAttributes, &screenTransitionMetrics); err != nil { + return err + } + + defer func() { + if r := recover(); r != nil { + err = r.(error) + } + }() + if screenTransitionMetrics.PreviousScreenName != nil && screenTransitionMetrics.CurrentScreenName != nil && screenTransitionMetrics.LatencyInMs != nil { + ScreenTransitionTotal.WithLabelValues(*screenTransitionMetrics.CurrentScreenName, *screenTransitionMetrics.PreviousScreenName).Inc() + ScreenTransitionLatencySum.WithLabelValues(*screenTransitionMetrics.CurrentScreenName, *screenTransitionMetrics.PreviousScreenName).Add(float64(*screenTransitionMetrics.LatencyInMs)) + ScreenTransitionLatencyHistogram.WithLabelValues(*screenTransitionMetrics.CurrentScreenName, *screenTransitionMetrics.PreviousScreenName).Observe(float64(*screenTransitionMetrics.LatencyInMs)) + ScreenTransitionLatencySummary.WithLabelValues(*screenTransitionMetrics.CurrentScreenName, *screenTransitionMetrics.PreviousScreenName).Observe(float64(*screenTransitionMetrics.LatencyInMs)) + } + + if screenTransitionMetrics.CurrentScreenName != nil && screenTransitionMetrics.LoadTimeInMs != nil { + ScreenLoadTotal.WithLabelValues(*screenTransitionMetrics.CurrentScreenName).Inc() + ScreenLoadTimeSum.WithLabelValues(*screenTransitionMetrics.CurrentScreenName).Add(float64(*screenTransitionMetrics.LoadTimeInMs)) + ScreenLoadTimeHistogram.WithLabelValues(*screenTransitionMetrics.CurrentScreenName).Observe(float64(*screenTransitionMetrics.LoadTimeInMs)) + ScreenLoadTimeSummary.WithLabelValues(*screenTransitionMetrics.CurrentScreenName).Observe(float64(*screenTransitionMetrics.LoadTimeInMs)) + } + + return +} + +func publishAppPerformanceMetric(metricAttributes map[string]interface{}, baseAttributes ingester.BaseAttributes) (err error) { + var appPerformanceMetrics ingester.AppPerformanceMetric + if err := utils.Convert[map[string]interface{}, ingester.AppPerformanceMetric](metricAttributes, &appPerformanceMetrics); err != nil { + return err + } + + defer func() { + if r := recover(); r != nil { + err = r.(error) + } + }() + if appPerformanceMetrics.AnrIntervals != nil { + AppAnrTotal.WithLabelValues(baseAttributes.AppVersionCode, baseAttributes.AppOS).Add(float64(len(appPerformanceMetrics.AnrIntervals))) + } + + if appPerformanceMetrics.AppStartTime != nil { + AppStartTimeTotal.WithLabelValues(baseAttributes.AppVersionCode, baseAttributes.AppOS).Inc() + AppStartTimeSum.WithLabelValues(baseAttributes.AppVersionCode, baseAttributes.AppOS).Add(float64(*appPerformanceMetrics.AppStartTime)) + } + + if appPerformanceMetrics.MemoryUsed != nil { + AppMemoryUsageTotal.WithLabelValues(baseAttributes.AppVersionCode, baseAttributes.AppOS).Inc() + AppMemoryUsageSum.WithLabelValues(baseAttributes.AppVersionCode, baseAttributes.AppOS).Add(float64(*appPerformanceMetrics.MemoryUsed)) + } + + if appPerformanceMetrics.AppDiskUsage != nil { + AppDiskUsageTotal.WithLabelValues(baseAttributes.AppVersionCode, baseAttributes.AppOS).Inc() + AppDiskUsageCount.WithLabelValues(baseAttributes.AppVersionCode, baseAttributes.AppOS).Add(float64(*appPerformanceMetrics.AppDiskUsage)) + } + return +} diff --git a/alfred/internal/metrics/app_metrics.go b/alfred/internal/metrics/app_metrics.go new file mode 100644 index 0000000..80715ef --- /dev/null +++ b/alfred/internal/metrics/app_metrics.go @@ -0,0 +1,181 @@ +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +var AppCrashTotal = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "app_crash_total", + Help: "number of app crashes per os and app version", + }, + []string{"app_version_code", "app_os"}, +) + +var AppAnrTotal = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "app_anr_total", + Help: "number of app anr per os and app version", + }, + []string{"app_version_code", "app_os"}, +) + +var AppStartTimeTotal = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "app_start_time_total", + Help: "app start time event counter", + }, + []string{"app_version_code", "app_os", "device_model"}, +) + +var AppStartTimeSum = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "app_start_time_sum", + Help: "app start time sum", + }, + []string{"app_version_code", "app_os", "device_model"}, +) + +var ApiRequestCounter = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "app_api_request_total", + Help: "api request counter", + }, + []string{"url", "method", "response_code", "app_version", "screen", "os_name", "biz_vertical"}, +) + +var ApiRequestLatencySum = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "app_api_request_latency_sum", + Help: "api request latency sum", + }, + []string{"url", "method", "response_code", "app_version", "screen", "os_name", "biz_vertical"}, +) + +var ApiRequestLatencyHistogram = promauto.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "app_api_request_latency_histogram", + Help: "api latency histogram", + }, + []string{"url", "method", "response_code", "app_version", "screen", "os_name", "biz_vertical"}, +) + +var apiRequestLatencySummary = promauto.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "app_api_request_latency_summary", + Help: "api latency summary", + Objectives: map[float64]float64{ + 0.5: 0.1, + 0.90: 0.05, + 0.95: 0.05, + }, + }, + []string{"url", "method", "response_code", "app_version", "screen", "os_name", "biz_vertical"}, +) + +var ScreenLoadTotal = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "app_screen_load_total", + Help: "number of screen transition per screen", + }, + []string{"current_screen_name"}, +) + +var ScreenLoadTimeSum = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "app_screen_load_time_sum", + Help: "screen transition load time sum", + }, + []string{"current_screen_name"}, +) + +var ScreenLoadTimeHistogram = promauto.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "app_screen_load_time_histogram", + Help: "screen load time histogram", + }, + []string{"current_screen_name"}, +) + +var ScreenLoadTimeSummary = promauto.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "app_screen_load_time_summary", + Help: "screen load time histogram", + Objectives: map[float64]float64{ + 0.5: 0.1, + 0.90: 0.05, + 0.95: 0.05, + }, + }, + []string{"current_screen_name"}, +) + +var ScreenTransitionTotal = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "app_screen_transition_total", + Help: "number of screen transition per screen", + }, + []string{"current_screen_name", "previous_screen_name"}, +) + +var ScreenTransitionLatencySum = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "app_screen_transition_latency_sum", + Help: "screen transition latency sum", + }, + []string{"current_screen_name", "previous_screen_name"}, +) + +var ScreenTransitionLatencyHistogram = promauto.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "app_screen_transition_latency_histogram", + Help: "screen transition latency histogram", + }, + []string{"current_screen_name", "previous_screen_name"}, +) + +var ScreenTransitionLatencySummary = promauto.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "app_screen_transition_latency_summary", + Help: "screen transition latency histogram", + Objectives: map[float64]float64{ + 0.5: 0.1, + 0.90: 0.05, + 0.95: 0.05, + }, + }, + []string{"current_screen_name", "previous_screen_name"}, +) + +var AppMemoryUsageSum = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "app_memory_sum", + Help: "app memory usage sum", + }, + []string{"app_version_code", "app_os"}, +) + +var AppMemoryUsageTotal = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "app_memory_total", + Help: "app memory usage count", + }, + []string{"app_version_code", "app_os"}, +) + +var AppDiskUsageTotal = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "app_disk_sum", + Help: "app disk usage sum", + }, + []string{"app_version_code", "app_os"}, +) + +var AppDiskUsageCount = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "app_disk_total", + Help: "app disk event count", + }, + []string{"app_version_code", "app_os"}, +) diff --git a/alfred/internal/metrics/metrics.go b/alfred/internal/metrics/metrics.go new file mode 100644 index 0000000..f15b265 --- /dev/null +++ b/alfred/internal/metrics/metrics.go @@ -0,0 +1,235 @@ +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +var KafkaEventIngestionEventSuccessCounter = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "alfred_event_ingestion_success", + Help: "Successfully ingested event to kafka topic", + }, + []string{"topic"}, +) + +var KafkaEventIngestionEventFailureCounter = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "alfred_event_ingestion_failure", + Help: "Failed to ingest event to kafka topic", + }, + []string{"topic"}, +) + +var KafkaEventConsumptionEventSuccessCounter = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "alfred_event_consumption_success", + Help: "Successfully consumed event from kafka topic", + }, + []string{"topic"}, +) + +var KafkaEventConsumptionEventFailureCounter = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "alfred_event_consumption_failure", + Help: "Failed to consume event from kafka topic", + }, + []string{"topic"}, +) + +var S3OperationSuccessCounter = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "alfred_s3_operation_success", + Help: "file s3 operation successful", + }, + []string{"bucket", "operation"}, +) + +var S3OperationFailureCounter = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "alfred_s3_operation_failure", + Help: "file s3 operation Failure", + }, + []string{"bucket", "operation"}, +) + +var ElasticSearchIngestionSuccessCounter = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "alfred_data_ingestion_success", + Help: "Successfully ingested data to es", + }, + []string{"index"}, +) + +var ElasticSearchIngestionFailureCounter = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "alfred_data_ingestion_failure", + Help: "Failed to ingest data to es", + }, + []string{"index"}, +) + +var ElasticSearchIndexCreationSuccessCounter = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "alfred_index_creation_success", + Help: "Successfully created index to es", + }, + []string{"index"}, +) + +var ElasticSearchIndexCreationFailureCounter = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "alfred_index_creation_failure", + Help: "Failed to create index to es", + }, + []string{"index"}, +) + +var ElasticSearchFetchSuccessCounter = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "alfred_data_fetch_success", + Help: "Successfully ingested data to es", + }, + []string{"index"}, +) + +var ElasticSearchFetchFailureCounter = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "alfred_data_fetch_failure", + Help: "Failed to ingest data to es", + }, + []string{"index"}, +) + +var ElasticSearchUpdateFailureCounter = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "alfred_data_update_failure", + Help: "failed to update data in es", + }, + []string{"index"}, +) + +var ElasticSearchUpdateSuccessCounter = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "alfred_data_update_success", + Help: "Successfully updated data in es", + }, + []string{"index"}, +) + +var ElasticSearchDeleteFailureCounter = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "alfred_data_delete_failure", + Help: "failed to delete data in es", + }, + []string{"index"}, +) + +var ElasticSearchDeleteSuccessCounter = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "alfred_data_delete_success", + Help: "Successfully deleted data in es", + }, + []string{"index"}, +) + +var MediaGenerationSuccessCounter = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "alfred_media_generation_success", + Help: "Successfully generated media", + }, + []string{"type"}, +) + +var MediaGenerationFailureCounter = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "alfred_media_generation_failure", + Help: "Failed to generate media", + }, + []string{"type"}, +) + +var AlfredApiRequestCounter = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "alfred_api_request_total", + Help: "api request counter", + }, + []string{"url", "method", "response_code"}, +) + +var AlfredApiRequestLatencySum = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "alfred_api_request_latency_sum", + Help: "api request latency sum", + }, + []string{"url", "method", "response_code"}, +) + +var AlfredApiRequestLatencyHistogram = promauto.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "alfred_api_request_latency_histogram", + Help: "api latency histogram", + }, + []string{"url", "method", "response_code"}, +) + +var AlfredKafkaProduceLatencyHistogram = promauto.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "alfred_kafka_produce_latency_histogram", + Help: "alfred kafka latency histogram", + }, + []string{"topic", "client_name"}, +) + +var AlfredApiRequestLatencySummary = promauto.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "alfred_api_request_latency_summary", + Help: "api latency summary", + Objectives: map[float64]float64{ + 0.5: 0.1, + 0.90: 0.05, + 0.95: 0.05, + }, + }, + []string{"url", "method", "response_code"}, +) + +var ErrorEventsUpdateCronFailureCounter = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "alfred_error_events_update_cron_failure", + Help: "Failed to update error events", + }, + []string{"app", "error_message"}, +) + +var DeviceMetricsUpdateSuccessCounter = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "alfred_device_metrics_update_success", + Help: "Successfully updates device metrics", + }, + []string{"index"}, +) + +var DeviceMetricsUpdateFailureCounter = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "alfred_device_metrics_update_failure", + Help: "Failed to update error events", + }, + []string{"index"}, +) + +var DSApiRequestSuccessCounter = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "alfred_ds_api_request_success", + Help: "Successfully called DS API", + }, + []string{"url", "response_code"}, +) + +var DSApiRequestFailureCounter = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "alfred_ds_api_request_failure", + Help: "Failed to call DS API", + }, + []string{"url", "response_code"}, +) diff --git a/alfred/internal/metrics/server.go b/alfred/internal/metrics/server.go new file mode 100644 index 0000000..25a33b9 --- /dev/null +++ b/alfred/internal/metrics/server.go @@ -0,0 +1,21 @@ +package metrics + +import ( + "alfred/pkg/log" + "fmt" + "github.com/gin-contrib/pprof" + "github.com/gin-gonic/gin" + "github.com/prometheus/client_golang/prometheus/promhttp" + "go.uber.org/zap" +) + +func AdminHandler(port int) { + ginServer := gin.New() + + log.Info("starting metrics on ", zap.Int("port", port)) + ginServer.GET("/metrics", gin.WrapH(promhttp.Handler())) + pprof.Register(ginServer) + go func() { + ginServer.Run(fmt.Sprintf(":%v", port)) + }() +} diff --git a/alfred/internal/shedlock/shedlock.go b/alfred/internal/shedlock/shedlock.go new file mode 100644 index 0000000..b3b3aa8 --- /dev/null +++ b/alfred/internal/shedlock/shedlock.go @@ -0,0 +1,48 @@ +package shedlock + +import ( + "alfred/model/common" + "alfred/repositoryAccessLayer" + "alfred/utils" + "errors" + "net/http" + "os" + "time" +) + +func Lock(cronName string, lockedUntilTime time.Duration, repositories *repositoryAccessLayer.RepositoryAccessLayer) error { + + shedlockAccessLayer := repositories.ShedlockAccessLayer + + response, status, err := shedlockAccessLayer.GetShedLockStatus(cronName) + if err != nil { + return err + } + + if response.Source.LockedUntil >= utils.GetCurrentTimeInMillis() && response.Source.LockedBy != os.Getenv("HOSTNAME") { + return errors.New("cron Already Is Locked") + } + + req := &common.ShedLock{ + Name: cronName, + LockedAt: utils.GetCurrentTimeInMillis(), + LockedBy: os.Getenv("HOSTNAME"), + LockedUntil: utils.GetCurrentTimeInMillis() + (lockedUntilTime * time.Minute).Milliseconds(), + } + + if status == http.StatusNotFound { + err := shedlockAccessLayer.InsertShedlockForCron(req) + if err != nil { + return err + } + } else { + status, err := shedlockAccessLayer.InsertShedlockForCronWithOptimisticControl(req, &response.PrimaryTerm, &response.SeqNo) + if err != nil { + return err + } else if status > 299 { + return errors.New("OCC failed , some pod updated the lock") + } + } + + return nil +} diff --git a/alfred/mapper/ShedlockMapper.go b/alfred/mapper/ShedlockMapper.go new file mode 100644 index 0000000..2eeb6c0 --- /dev/null +++ b/alfred/mapper/ShedlockMapper.go @@ -0,0 +1,24 @@ +package mapper + +import ( + "alfred/model/es" + "alfred/pkg/log" + "encoding/json" + "github.com/elastic/go-elasticsearch/v8/esapi" + "go.uber.org/zap" +) + +func MapEsapiResponseToShedlockResponse(response *esapi.Response) *es.ShedlockResponse { + + defer response.Body.Close() + + var shedlockResponse es.ShedlockResponse + + err := json.NewDecoder(response.Body).Decode(&shedlockResponse) + if err != nil { + log.Error("Mapping failed for shedlock", zap.Error(err)) + return nil + } + + return &shedlockResponse +} diff --git a/alfred/mapper/commonMapper.go b/alfred/mapper/commonMapper.go new file mode 100644 index 0000000..8a137fa --- /dev/null +++ b/alfred/mapper/commonMapper.go @@ -0,0 +1,46 @@ +package mapper + +import ( + "alfred/model/es" + "alfred/utils" + "encoding/json" + "errors" + "github.com/elastic/go-elasticsearch/v8/esapi" + "reflect" + "strconv" +) + +func MapESApiResponseToESResponse(response *esapi.Response) (*es.ESResponse, error) { + defer response.Body.Close() + var esResponse es.ESResponse + err := json.NewDecoder(response.Body).Decode(&esResponse) + if err != nil { + return nil, err + } + return &esResponse, nil +} + +func MapESResponseToGetUniqueValues(search *esapi.Response, key string) ([]string, error) { + result, err := MapESApiResponseToESResponse(search) + if err != nil { + return nil, err + } + if len(result.Aggregation.Buckets.Buckets) == 0 { + return nil, errors.New("empty response") + } + + var values []string + for _, event := range result.Aggregation.Buckets.Buckets { + if reflect.TypeOf(event.Key) == reflect.TypeOf("i") && event.Key.(string) != utils.EMPTY { + values = append(values, event.Key.(string)) + } else if reflect.TypeOf(event.Key) == reflect.TypeOf(int64(0)) { + str := strconv.FormatInt(event.Key.(int64), 10) + values = append(values, str) + } else if reflect.TypeOf(event.Key) == reflect.TypeOf(float64(0.0)) { + str := strconv.FormatFloat(event.Key.(float64), 'f', 0, 64) + values = append(values, str) + } + } + + return values, nil +} diff --git a/alfred/mapper/cruiseControlMapper.go b/alfred/mapper/cruiseControlMapper.go new file mode 100644 index 0000000..946fe03 --- /dev/null +++ b/alfred/mapper/cruiseControlMapper.go @@ -0,0 +1,68 @@ +package mapper + +import ( + "alfred/model/core/cruise" + "alfred/model/es" + "alfred/model/mapper" + "alfred/utils" + "encoding/json" + "errors" + "github.com/elastic/go-elasticsearch/v8/esapi" +) + +type CruiseControlMapper interface { + MapCruiseResponseToList(result es.ESResponse) ([]string, error) + MapCruiseResponseToString(result es.ESResponse) (string, error) + MapEsResponseToCruiseControlConfig(value map[string]interface{}) (cruise.ControlConfig, error) +} + +func MapCruiseResponseToList(search *esapi.Response) ([]string, error) { + result, err := MapESApiResponseToESResponse(search) + if err != nil { + return nil, err + } + var responseList []string + for _, value := range result.Hits.Hits { + valueLocal := value + responseList = append(responseList, valueLocal.Fields["os_config.app_version"][0].(string)) + } + return responseList, nil +} + +func MapCruiseResponseToString(search *esapi.Response) (string, error) { + result, err := MapESApiResponseToESResponse(search) + if err != nil { + return utils.EMPTY, err + } + if result.Hits.Hits == nil { + return utils.EMPTY, errors.New("Mapping Failed") + } + value := result.Hits.Hits[0] + return value.Fields["os_config.app_version"][0].(string), nil +} + +func MapEsResponseToCruiseControlConfig(value map[string]interface{}) (cruise.ControlConfig, error) { + var cruiseControlRequest cruise.ControlConfig + + var cruiseMapper mapper.CruiseMapper + + jsonData, err := json.Marshal(value) + if err != nil { + return cruiseControlRequest, err + } + err = json.Unmarshal(jsonData, &cruiseMapper) + if err != nil { + return cruiseControlRequest, err + } + jsonData, err = json.Marshal(cruiseMapper) + if err != nil { + return cruiseControlRequest, err + } + err = json.Unmarshal(jsonData, &cruiseControlRequest) + if err != nil { + return cruiseControlRequest, err + } + + cruiseControlRequest.ConfigTime = utils.GetCurrentTimeInMillis() + return cruiseControlRequest, nil +} diff --git a/alfred/mapper/deviceMapper.go b/alfred/mapper/deviceMapper.go new file mode 100644 index 0000000..3ccb313 --- /dev/null +++ b/alfred/mapper/deviceMapper.go @@ -0,0 +1,28 @@ +package mapper + +import ( + "alfred/model/common" + "alfred/model/core" + "encoding/json" + "github.com/elastic/go-elasticsearch/v8/esapi" +) + +func MapEsApiResponseToDeviceMetricsResponse(search *esapi.Response) ([]core.DeviceMetricsEsResponse, error) { + result, err := MapESApiResponseToESResponse(search) + if err != nil { + return nil, err + } + if len(result.Hits.Hits) == 0 { + return []core.DeviceMetricsEsResponse{}, &common.InvalidDeviceRequestError{} + } + + var deviceMetrics []core.DeviceMetricsEsResponse + jsonHits, err := json.Marshal(result.Hits.Hits) + if err != nil { + return nil, err + } + if err = json.Unmarshal(jsonHits, &deviceMetrics); err != nil { + return nil, err + } + return deviceMetrics, nil +} diff --git a/alfred/mapper/errorEventsMapper.go b/alfred/mapper/errorEventsMapper.go new file mode 100644 index 0000000..c0f9adf --- /dev/null +++ b/alfred/mapper/errorEventsMapper.go @@ -0,0 +1,60 @@ +package mapper + +import ( + "alfred/model/common" + "alfred/model/es" + "encoding/json" + "github.com/elastic/go-elasticsearch/v8/esapi" +) + +func MapEsResponseAndGetSessionIds(search *esapi.Response) ([]string, error) { + result, err := MapESApiResponseToESResponse(search) + if err != nil { + return nil, err + } + var sessionIds []string + + if len(result.Aggregation.Buckets.Buckets) == 0 { + return sessionIds, nil + } + + for _, event := range result.Aggregation.Buckets.Buckets { + sessionIds = append(sessionIds, event.Key.(string)) + } + return sessionIds, nil +} + +func MapEsResponseToErrorEventsResponse(search *esapi.Response) ([]es.ErrorEventsResponse, error) { + result, err := MapESApiResponseToESResponse(search) + if err != nil { + return nil, err + } + if len(result.Hits.Hits) == 0 { + return []es.ErrorEventsResponse{}, nil + } + + var errorEvents []es.ErrorEventsResponse + jsonHits, err := json.Marshal(result.Hits.Hits) + if err != nil { + return nil, err + } + if err = json.Unmarshal(jsonHits, &errorEvents); err != nil { + return nil, err + } + return errorEvents, nil +} + +func MapEsResponseToTimeStamp(clientName string, search *esapi.Response) (int64, error) { + result, err := MapESApiResponseToESResponse(search) + if err != nil { + return 0, err + } + if len(result.Hits.Hits) == 0 { + return 0, &common.InvalidSessionError{} + } + timestamp, ok := result.Hits.Hits[0].Source[clientName].(float64) + if !ok { + return 0, &common.InvalidSessionError{} + } + return int64(timestamp), nil +} diff --git a/alfred/mapper/eventsMapper.go b/alfred/mapper/eventsMapper.go new file mode 100644 index 0000000..9c628fa --- /dev/null +++ b/alfred/mapper/eventsMapper.go @@ -0,0 +1,49 @@ +package mapper + +import ( + "alfred/model/common" + "alfred/model/es" + "alfred/pkg/log" + "encoding/json" + "github.com/elastic/go-elasticsearch/v8/esapi" + "go.uber.org/zap" +) + +func MapEsResponseToEventResponse(search *esapi.Response) ([]es.EventResponse, *int64, error) { + result, err := MapESApiResponseToESResponse(search) + if err != nil { + return nil, nil, err + } + if len(result.Hits.Hits) == 0 { + return nil, nil, &common.InvalidSessionError{} + } + + var eventResponse []es.EventResponse + jsonHits, err := json.Marshal(result.Hits.Hits) + if err != nil { + return nil, nil, err + } + if err = json.Unmarshal(jsonHits, &eventResponse); err != nil { + return nil, nil, err + } + + return eventResponse, &result.Aggregation.Filter.DocCount, nil +} + +func MapEsResponseToZipResponse(search *esapi.Response) ([]string, error) { + defer search.Body.Close() + var esResponse es.EventResponseForZips + err := json.NewDecoder(search.Body).Decode(&esResponse) + if err != nil { + return nil, err + } + + var zips []string + + for _, key := range esResponse.Aggregations.UniqueZipNames.Buckets { + zips = append(zips, key.Key) + log.Info("Zip List:", zap.String("ZipNAme", key.Key)) + } + + return zips, nil +} diff --git a/alfred/mapper/sessionsMapper.go b/alfred/mapper/sessionsMapper.go new file mode 100644 index 0000000..34a0bc5 --- /dev/null +++ b/alfred/mapper/sessionsMapper.go @@ -0,0 +1,117 @@ +package mapper + +import ( + "alfred/config" + "alfred/model/common" + "alfred/model/es" + "alfred/pkg/log" + "alfred/utils" + "encoding/json" + "github.com/elastic/go-elasticsearch/v8/esapi" + "go.uber.org/zap" +) + +func MapEsResponseToSessionResponse(search *esapi.Response) ([]es.SessionResponse, error) { + result, err := MapESApiResponseToESResponse(search) + if err != nil { + return nil, err + } + if len(result.Hits.Hits) == 0 { + return []es.SessionResponse{}, &common.InvalidSessionError{} + } + + //fetching sessionId, deviceId and event timestamp from es response. + var sessionResponse []es.SessionResponse + jsonHits, err := json.Marshal(result.Hits.Hits) + if err != nil { + return nil, err + } + if err = json.Unmarshal(jsonHits, &sessionResponse); err != nil { + return nil, err + } + for idx := range sessionResponse { + if sessionResponse[idx].Source.BaseAttributes.ImageType == utils.EMPTY { + sessionResponse[idx].Source.BaseAttributes.ImageType = utils.ImageTypeJpeg + } + if sessionResponse[idx].Source.BaseAttributes.FileTypeExtension == utils.EMPTY { + sessionResponse[idx].Source.BaseAttributes.FileTypeExtension = utils.ZipExtension.String() + } + } + return sessionResponse, nil +} + +func MapEsResponseToSessionWithDurationResponse(search *esapi.Response) ([]es.SessionResponse, error) { + defer search.Body.Close() + var result es.SessionEsResponse + err := json.NewDecoder(search.Body).Decode(&result) + if err != nil { + return nil, err + } + if len(result.Hits.Hits) == 0 { + return []es.SessionResponse{}, &common.InvalidSessionError{} + } + + //fetching sessionId, deviceId and event timestamp from es response. + var sessionResponse []es.SessionResponse + jsonHits, err := json.Marshal(result.Hits.Hits) + if err != nil { + log.Error("Error while marshalling app session", zap.Error(err)) + return nil, err + } + if err = json.Unmarshal(jsonHits, &sessionResponse); err != nil { + log.Error("Error while unmarshalling app session", zap.Error(err)) + return nil, err + } + + var finalSessionResponse []es.SessionResponse + for _, value := range result.Aggregations.Buckets.Buckets { + localValue := value + for _, session := range sessionResponse { + if session.Source.BaseAttributes.SessionId == value.Key { + var diff int64 + if localValue.MaxTimestamp.Value == 0 { + zipUploadTime := config.GetCoreConfig().ZipUploadTimeConfig + diff = int64(localValue.MaxClientTimestamp.Value+zipUploadTime-localValue.MinTimestamp.Value) / 1000 + } else { + diff = int64(localValue.MaxTimestamp.Value-localValue.MinTimestamp.Value) / 1000 + } + session.SessionDuration = &diff + finalSessionResponse = append(finalSessionResponse, session) + } + } + } + + return finalSessionResponse, nil +} + +func MapEsResponseToSessionResponseForAllSessions(search *esapi.Response) ([]es.SessionResponse, error) { + result, err := MapESApiResponseToESResponse(search) + if err != nil { + return nil, err + } + if len(result.Hits.Hits) == 0 { + return []es.SessionResponse{}, &common.InvalidSessionError{} + } + + //fetching sessionId, deviceId and event timestamp from es response. + + var sessionResponse []es.SessionResponse + jsonHits, err := json.Marshal(result.Hits.Hits) + if err != nil { + return nil, err + } + if err = json.Unmarshal(jsonHits, &sessionResponse); err != nil { + return nil, err + } + return sessionResponse, nil +} + +func MapEsApiResponseToSessionDeviceAttributesResponse(search *esapi.Response) (*es.SessionEsResponseForDeviceAttributes, error) { + defer search.Body.Close() + var esResponse es.SessionEsResponseForDeviceAttributes + err := json.NewDecoder(search.Body).Decode(&esResponse) + if err != nil { + return nil, err + } + return &esResponse, nil +} diff --git a/alfred/mapper/webSessionsMapper.go b/alfred/mapper/webSessionsMapper.go new file mode 100644 index 0000000..a44bd7e --- /dev/null +++ b/alfred/mapper/webSessionsMapper.go @@ -0,0 +1,67 @@ +package mapper + +import ( + "alfred/model/common" + "alfred/model/es" + "alfred/pkg/log" + "encoding/json" + "github.com/elastic/go-elasticsearch/v8/esapi" + "go.uber.org/zap" +) + +func MapEsResponseToWebSessionResponse(search *esapi.Response, page *es.Page) ([]es.WebSessionResponse, error) { + result, err := MapESApiResponseToESResponse(search) + if err != nil { + return nil, err + } + if len(result.Hits.Hits) == 0 { + return []es.WebSessionResponse{}, &common.InvalidSessionError{} + } + + var sessionResponse []es.WebSessionResponse + jsonHits, err := json.Marshal(result.Hits.Hits) + if err != nil { + return nil, err + } + if err = json.Unmarshal(jsonHits, &sessionResponse); err != nil { + return nil, err + } + return sessionResponse, nil +} + +func MapEsResponseToWebSessionResponseWithDuration(search *esapi.Response, page *es.Page) ([]es.WebSessionResponse, error) { + defer search.Body.Close() + var result es.WebSessionEsResponse + err := json.NewDecoder(search.Body).Decode(&result) + if err != nil { + return nil, err + } + if len(result.Hits.Hits) == 0 { + return []es.WebSessionResponse{}, &common.InvalidSessionError{} + } + + var webSessionResponse []es.WebSessionResponse + jsonHits, err := json.Marshal(result.Hits.Hits) + if err != nil { + log.Error("Error while marshalling web session", zap.Error(err)) + return nil, err + } + if err = json.Unmarshal(jsonHits, &webSessionResponse); err != nil { + log.Error("Error while unmarshalling web session", zap.Error(err)) + return nil, err + } + + var finalWebSessionResponse []es.WebSessionResponse + for _, webSession := range webSessionResponse { + for _, value := range result.Aggregations.Buckets.Buckets { + localValue := value + if webSession.Source.WebBaseAttributes.SessionId == value.Key { + timeDiff := int64(localValue.MaxEndTimestamp.Value-localValue.MinStartTimestamp.Value) / 1000 + webSession.WebSessionDurationInMillis = timeDiff + finalWebSessionResponse = append(finalWebSessionResponse, webSession) + } + } + } + + return finalWebSessionResponse, nil +} diff --git a/alfred/mocks/ConsumerGroupClaimMock.go b/alfred/mocks/ConsumerGroupClaimMock.go new file mode 100644 index 0000000..c9037a3 --- /dev/null +++ b/alfred/mocks/ConsumerGroupClaimMock.go @@ -0,0 +1,31 @@ +package mocks + +import ( + "github.com/Shopify/sarama" + "github.com/stretchr/testify/mock" +) + +type ConsumerGroupClaim struct { + mock.Mock +} + +func (c *ConsumerGroupClaim) Topic() string { + return "" +} + +func (c *ConsumerGroupClaim) Partition() int32 { + return 0 +} + +func (c *ConsumerGroupClaim) InitialOffset() int64 { + return 0 +} + +func (c *ConsumerGroupClaim) HighWaterMarkOffset() int64 { + return 0 +} + +func (c *ConsumerGroupClaim) Messages() <-chan *sarama.ConsumerMessage { + args := c.Called() + return args.Get(0).(<-chan *sarama.ConsumerMessage) +} diff --git a/alfred/mocks/ConsumerGroupSessionMock.go b/alfred/mocks/ConsumerGroupSessionMock.go new file mode 100644 index 0000000..f4342ab --- /dev/null +++ b/alfred/mocks/ConsumerGroupSessionMock.go @@ -0,0 +1,44 @@ +package mocks + +import ( + "context" + "github.com/Shopify/sarama" + "github.com/stretchr/testify/mock" +) + +type ConsumerGroupSession struct { + mock.Mock +} + +func (c *ConsumerGroupSession) Claims() map[string][]int32 { + return nil +} + +func (c *ConsumerGroupSession) MemberID() string { + args := c.Called() + return args.String(0) +} + +func (c *ConsumerGroupSession) GenerationID() int32 { + return 0 +} + +func (c *ConsumerGroupSession) MarkOffset(topic string, partition int32, offset int64, metadata string) { + +} + +func (c *ConsumerGroupSession) Commit() { + +} + +func (c *ConsumerGroupSession) ResetOffset(topic string, partition int32, offset int64, metadata string) { + +} + +func (c *ConsumerGroupSession) MarkMessage(msg *sarama.ConsumerMessage, metadata string) { + c.Called(msg, metadata) +} + +func (c *ConsumerGroupSession) Context() context.Context { + return nil +} diff --git a/alfred/mocks/MockAppMetricsPublisher.go b/alfred/mocks/MockAppMetricsPublisher.go new file mode 100644 index 0000000..d7b34e9 --- /dev/null +++ b/alfred/mocks/MockAppMetricsPublisher.go @@ -0,0 +1,14 @@ +package mocks + +import ( + "alfred/model/ingester" + "github.com/stretchr/testify/mock" +) + +type MockAppMetricsPublisher struct { + mock.Mock +} + +func (m *MockAppMetricsPublisher) PublishMetrics(metricAttributes map[string]interface{}, metricType ingester.MetricType, baseAttributes ingester.BaseAttributes) { + m.Called(metricAttributes, metricType, baseAttributes) +} diff --git a/alfred/mocks/MockAsyncProducer.go b/alfred/mocks/MockAsyncProducer.go new file mode 100644 index 0000000..098c0df --- /dev/null +++ b/alfred/mocks/MockAsyncProducer.go @@ -0,0 +1,59 @@ +package mocks + +import ( + "github.com/Shopify/sarama" + "github.com/stretchr/testify/mock" +) + +type MockAsyncProducer struct { + mock.Mock +} + +func (m *MockAsyncProducer) AsyncClose() { +} + +func (m *MockAsyncProducer) Close() error { + return nil +} + +func (m *MockAsyncProducer) Input() chan<- *sarama.ProducerMessage { + args := m.Called() + return args.Get(0).(chan<- *sarama.ProducerMessage) +} + +func (m *MockAsyncProducer) Successes() <-chan *sarama.ProducerMessage { + return nil +} + +func (m *MockAsyncProducer) Errors() <-chan *sarama.ProducerError { + return nil +} + +func (m *MockAsyncProducer) IsTransactional() bool { + return false +} + +func (m *MockAsyncProducer) TxnStatus() sarama.ProducerTxnStatusFlag { + args := m.Called() + return args.Get(0).(sarama.ProducerTxnStatusFlag) +} + +func (m *MockAsyncProducer) BeginTxn() error { + return nil +} + +func (m *MockAsyncProducer) CommitTxn() error { + return nil +} + +func (m *MockAsyncProducer) AbortTxn() error { + return nil +} + +func (m *MockAsyncProducer) AddOffsetsToTxn(offsets map[string][]*sarama.PartitionOffsetMetadata, groupId string) error { + return nil +} + +func (m *MockAsyncProducer) AddMessageToTxn(msg *sarama.ConsumerMessage, groupId string, metadata *string) error { + return nil +} diff --git a/alfred/mocks/MockCacheClient.go b/alfred/mocks/MockCacheClient.go new file mode 100644 index 0000000..1e3f3d1 --- /dev/null +++ b/alfred/mocks/MockCacheClient.go @@ -0,0 +1,27 @@ +package mocks + +import ( + "github.com/stretchr/testify/mock" + "time" +) + +type MockCacheClient struct { + mock.Mock +} + +func (m *MockCacheClient) PutWithTtl(key string, value interface{}, time time.Duration) { + m.Called(key, value, time) +} + +func (m *MockCacheClient) PutWithDefaultTtl(key string, value interface{}) { + m.Called(key, value) +} + +func (m *MockCacheClient) Get(key string) (interface{}, bool) { + args := m.Called(key) + return args.Get(0), args.Bool(1) +} + +func (m *MockCacheClient) Delete(key string) { + m.Called(key) +} diff --git a/alfred/mocks/MockClientManager.go b/alfred/mocks/MockClientManager.go new file mode 100644 index 0000000..c84f471 --- /dev/null +++ b/alfred/mocks/MockClientManager.go @@ -0,0 +1,15 @@ +package mocks + +import ( + "alfred/cmd/ingester/app/service/interfaces" + "github.com/stretchr/testify/mock" +) + +type MockClientManager struct { + mock.Mock +} + +func (m *MockClientManager) GetClientByType(clientName string) (interfaces.Client, error) { + args := m.Called(clientName) + return args.Get(0).(interfaces.Client), args.Error(1) +} diff --git a/alfred/mocks/MockCommonValidationStrategy.go b/alfred/mocks/MockCommonValidationStrategy.go new file mode 100644 index 0000000..f0847f1 --- /dev/null +++ b/alfred/mocks/MockCommonValidationStrategy.go @@ -0,0 +1,21 @@ +package mocks + +import ( + "alfred/model/ingester" + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/mock" +) + +type MockCommonValidationStrategy struct { + mock.Mock +} + +func (m *MockCommonValidationStrategy) Validate(c *gin.Context, request ingester.SessionUploadRequest) error { + args := m.Called(c, request) + return args.Error(0) +} + +func (m *MockCommonValidationStrategy) ValidateWebSession(c *gin.Context, request ingester.WebSessionUploadRequest) error { + args := m.Called(c, request) + return args.Error(0) +} diff --git a/alfred/mocks/MockConsumerGroup.go b/alfred/mocks/MockConsumerGroup.go new file mode 100644 index 0000000..71da5e7 --- /dev/null +++ b/alfred/mocks/MockConsumerGroup.go @@ -0,0 +1,38 @@ +package mocks + +import ( + "context" + "github.com/Shopify/sarama" + "github.com/stretchr/testify/mock" +) + +type MockConsumerGroup struct { + mock.Mock +} + +func (m *MockConsumerGroup) Consume(ctx context.Context, topics []string, handler sarama.ConsumerGroupHandler) error { + return nil +} + +func (m *MockConsumerGroup) Errors() <-chan error { + return nil +} + +func (m *MockConsumerGroup) Close() error { + return nil +} + +func (m *MockConsumerGroup) Pause(partitions map[string][]int32) { +} + +func (m *MockConsumerGroup) Resume(partitions map[string][]int32) { + +} + +func (m *MockConsumerGroup) PauseAll() { + +} + +func (m *MockConsumerGroup) ResumeAll() { + +} diff --git a/alfred/mocks/MockCosmosValidationStrategy.go b/alfred/mocks/MockCosmosValidationStrategy.go new file mode 100644 index 0000000..5e394f5 --- /dev/null +++ b/alfred/mocks/MockCosmosValidationStrategy.go @@ -0,0 +1,16 @@ +package mocks + +import ( + "alfred/cmd/ingester/app/model/cosmos" + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/mock" +) + +type MockCosmosValidationStrategy struct { + mock.Mock +} + +func (m *MockCosmosValidationStrategy) Validate(c *gin.Context, request cosmos.SessionUploadRequest) error { + args := m.Called(c, request) + return args.Error(0) +} diff --git a/alfred/mocks/MockCruiseService.go b/alfred/mocks/MockCruiseService.go new file mode 100644 index 0000000..979211e --- /dev/null +++ b/alfred/mocks/MockCruiseService.go @@ -0,0 +1,46 @@ +package mocks + +import ( + "alfred/model/core/cruise" + "alfred/model/es" + "alfred/model/ingester" + "github.com/stretchr/testify/mock" +) + +type MockCruiseService struct { + mock.Mock +} + +func (m *MockCruiseService) FetchCruise(appVersionName string, osVersion string, appVersionCode string, index string, clientName string) (*es.ESResponse, error) { + args := m.Called(appVersionName, osVersion, appVersionCode, index, clientName) + + mockSource := make(map[string]interface{}) + mockSource["os_config"] = cruise.OsConfig{ + OsVersion: "31", + } + var fps int8 + fps = 1 + mockSource["recordings_config"] = make(map[string]interface{}) + mockSource["recordings_config"].(map[string]interface{})["snapshot_per_second"] = &fps + + mockResp := &es.ESResponse{ + Hits: es.HitsObject{ + Hits: []es.Hit{ + { + Source: mockSource, + }, + }, + }, + } + + if args.Get(0) == nil { + return mockResp, args.Error(1) + } + + return mockResp, args.Error(1) +} + +func (m *MockCruiseService) GetFpsForDevice(deviceInfo ingester.DeviceAndNetworkAttributes) int { + args := m.Called(deviceInfo) + return args.Int(0) +} diff --git a/alfred/mocks/MockElasticSearchClient.go b/alfred/mocks/MockElasticSearchClient.go new file mode 100644 index 0000000..0fffce4 --- /dev/null +++ b/alfred/mocks/MockElasticSearchClient.go @@ -0,0 +1,131 @@ +package mocks + +import ( + "alfred/api/request" + "alfred/model/core/cruise" + "alfred/model/es" + "alfred/model/ingester" + "encoding/json" + "errors" + "github.com/stretchr/testify/mock" +) + +type MockElasticSearchClient struct { + mock.Mock +} + +func (m *MockElasticSearchClient) FetchUniqueFragments(key string, fragmentIngestionIndex string) ([]string, error) { + return nil, nil +} + +func (m *MockElasticSearchClient) FetchEventsWithLabels(labels string, appName, screenName, fragmentName, vertical, appVersion []string, deviceIds []string, startTime, endTime int64, page *es.Page, eventIngestionIndex string, phoneNumber []string, customerId []string, codePushVersion []string, agentEmailId []string) ([]es.EventResponse, error) { + return nil, nil +} + +func (m *MockElasticSearchClient) CreateFragments(fragmentData ingester.FragmentModel, message string, fragmentIngestionIndex string) error { + return nil +} + +func (m *MockElasticSearchClient) FetchAllEventsFromSession(sessionId string, page *es.Page, eventIngestionIndex string) ([]es.EventResponse, *int64, error) { + return nil, nil, nil +} + +func (m *MockElasticSearchClient) FetchSessionWithDeviceIds(deviceId []string, startTimestamp, endTimestamp int64, page *es.Page, sessionUploadIndex string) ([]es.SessionResponse, error) { + //implement when writing tests. + return nil, nil +} + +func (m *MockElasticSearchClient) FetchSessionWithSessionId(sessionIds []string, page *es.Page, sessionUploadIndex string) ([]es.SessionResponse, error) { + //implement when writing tests. + return nil, nil +} + +func (m *MockElasticSearchClient) CreateCruiseControlConfig(cruiseControl *cruise.ControlConfig, cruiseControlIndex string) error { + args := m.Called(cruiseControl, cruiseControlIndex) + return args.Error(0) +} + +func (m *MockElasticSearchClient) FetchPreviousAppVersion(cruiseControlIndex, appOs string) (string, error) { + args := m.Called(cruiseControlIndex) + return args.String(0), args.Error(1) +} + +func (m *MockElasticSearchClient) FetchCruiseControlConfig(appVersion, appOs, cruiseControlIndex string) (*es.ESResponse, error) { + m.Called(appVersion, cruiseControlIndex) + var a *es.ESResponse + if appVersion[len(appVersion)-1] == '1' { + st := "{\n \"took\" : 0,\n \"hits\" : {\n \"total\" : {\n \"value\" : 1,\n \"relation\" : \"eq\"\n },\n \"max_score\" : 3.845883,\n \"hits\" : [\n {\n \"_index\" : \"qa-alfred-cruise-control-index\",\n \"_type\" : \"_doc\",\n \"_id\" : \"4.0.0\",\n \"_score\" : 3.845883,\n \"_source\" : {\n \"enable\" : true,\n \"config_time\" : 1694764705604,\n \"os_config\" : {\n \"app_version_code\" : \"111\",\n \"app_version\" : \"4.0.0\",\n \"os_version\" : \"30\"\n },\n \"type\" : \"android\",\n \"metrics_config\" : {\n \"disable_api_performance\" : false,\n \"disable_remote_logging\" : false,\n \"disable_cpu_monitoring\" : false,\n \"disable_memory_monitoring\" : false,\n \"disable_api_response\" : false,\n \"disable_api_request\" : true,\n \"enabled_api_paths\" : [\n \"LE/requests/Path_Param_Value\",\n \"v2/home\"\n ]\n },\n \"recordings_config\" : {\n \"enable\" : true,\n \"video_quality\" : \"Low\",\n \"video_recording_policy\" : \"All\",\n \"snapshot_per_second\" : 1,\n \"disable_screens\" : [\n \"payment\"\n ],\n \"masked_enabled_screens\" : [\n \"new_home_activity\",\n \"NaviApp_HomePage_Lands\"\n ]\n }\n }\n }\n ]\n }\n}\n" + err := json.Unmarshal([]byte(st), &a) + if err != nil { + panic("stop test") + } + return a, nil + } else if appVersion[len(appVersion)-1] == '2' { + st := "{\n \"took\" : 0,\n \"timed_out\" : false,\n \"_shards\" : {\n \"total\" : 1,\n \"successful\" : 1,\n \"skipped\" : 0,\n \"failed\" : 0\n },\n \"hits\" : {\n \"total\" : {\n \"value\" : 0,\n \"relation\" : \"eq\"\n },\n \"max_score\" : null,\n \"hits\" : [ ]\n }\n}" + err := json.Unmarshal([]byte(st), &a) + if err != nil { + panic("stop tests") + } + return a, nil + } else if appVersion[len(appVersion)-1] == 'f' { + return nil, errors.New("mocked error") + } + return a, nil +} + +func (m *MockElasticSearchClient) FetchAllCruiseControlAppVersions(cruiseControlIndex, appOs string) ([]string, error) { + //implement when writing tests. + return nil, nil +} + +func (m *MockElasticSearchClient) CreateEventIngester(message string, eventIngestionIndex string) error { + args := m.Called(message, eventIngestionIndex) + return args.Error(0) +} + +func (m *MockElasticSearchClient) FetchEventsFromSessionId(sessionId string, page *es.Page, eventIngestionIndex string) ([]es.EventResponse, *int64, error) { + //implement when writing tests. + return nil, nil, nil +} + +func (m *MockElasticSearchClient) CreateSessionUpload(alfredSessionRecordingEvent ingester.SessionUploadRequest, message string, sessionUploadIndex string) error { + + args := m.Called(alfredSessionRecordingEvent, message, sessionUploadIndex) + return args.Error(0) +} + +func (m *MockElasticSearchClient) FetchSession(startTimestamp, endTimestamp int64, page *es.Page, sessionUploadIndex string) ([]es.SessionResponse, error) { + //implement when writing tests. + return nil, nil +} + +func (m *MockElasticSearchClient) FetchSessionAndSessionDurationWithSessionIds(sessionIds []string, sessionUploadIndex string) ([]es.SessionResponse, error) { + //implement when writing tests. + return nil, nil +} + +func (m *MockElasticSearchClient) CreateIndex(baseIndex, indexSuffix string, mapping string) { + + m.Called(baseIndex, indexSuffix, mapping) +} + +func (m *MockElasticSearchClient) FetchUniqueKeys(key string, eventIngestionIndex string) ([]string, error) { + + return nil, nil +} + +func (m *MockElasticSearchClient) UploadWebSession(sessionUploadRequest ingester.WebSessionUploadRequest) error { + + args := m.Called(sessionUploadRequest) + return args.Error(0) +} + +func (m *MockElasticSearchClient) FetchWebSessionWithSessionId(sessionId string) ([]es.WebSessionResponse, error) { + //implement when writing tests. + return nil, nil +} + +func (m *MockElasticSearchClient) FetchAllWebSession(filters request.WebSessionFilters, page *es.Page) ([]es.WebSessionResponse, error) { + //implement when writing tests. + return nil, nil +} diff --git a/alfred/mocks/MockHeadersValidator.go b/alfred/mocks/MockHeadersValidator.go new file mode 100644 index 0000000..6fc404e --- /dev/null +++ b/alfred/mocks/MockHeadersValidator.go @@ -0,0 +1,15 @@ +package mocks + +import ( + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/mock" +) + +type MockHeadersValidator struct { + mock.Mock +} + +func (m *MockHeadersValidator) ValidateHeaders(c *gin.Context) (string, error) { + args := m.Called(c) + return args.String(0), args.Error(1) +} diff --git a/alfred/mocks/MockIngesterClient.go b/alfred/mocks/MockIngesterClient.go new file mode 100644 index 0000000..ca6b018 --- /dev/null +++ b/alfred/mocks/MockIngesterClient.go @@ -0,0 +1,22 @@ +package mocks + +import ( + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/mock" +) + +type MockIngesterClient struct { + mock.Mock +} + +func (m *MockIngesterClient) IngestSession(c *gin.Context) { + m.Called(c) +} + +func (m *MockIngesterClient) IngestEvent(c *gin.Context) { + m.Called(c) +} + +func (m *MockIngesterClient) IngestAppMetrics(c *gin.Context) { + m.Called(c) +} diff --git a/alfred/mocks/MockKProducer.go b/alfred/mocks/MockKProducer.go new file mode 100644 index 0000000..7a8e3f4 --- /dev/null +++ b/alfred/mocks/MockKProducer.go @@ -0,0 +1,18 @@ +package mocks + +import "github.com/stretchr/testify/mock" + +type MockKProducer struct { + mock.Mock +} + +func (m *MockKProducer) Errors() { + +} +func (m *MockKProducer) Successes() { + +} +func (m *MockKProducer) SendMessage(mappedRequest interface{}, topic, key, clientName string) error { + args := m.Called(mappedRequest, topic, key, clientName) + return args.Error(0) +} diff --git a/alfred/mocks/MockLitmusProxy.go b/alfred/mocks/MockLitmusProxy.go new file mode 100644 index 0000000..f1810e9 --- /dev/null +++ b/alfred/mocks/MockLitmusProxy.go @@ -0,0 +1,13 @@ +package mocks + +import "github.com/stretchr/testify/mock" + +type MockLitmusProxy struct { + mock.Mock +} + +func (m MockLitmusProxy) GetLitmusExperimentOutput(deviceId string) bool { + + args := m.Called(deviceId) + return args.Bool(0) +} diff --git a/alfred/mocks/MockS3Client.go b/alfred/mocks/MockS3Client.go new file mode 100644 index 0000000..8fae5ec --- /dev/null +++ b/alfred/mocks/MockS3Client.go @@ -0,0 +1,42 @@ +package mocks + +import ( + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/stretchr/testify/mock" +) + +type MockS3Client struct { + mock.Mock +} + +func (m *MockS3Client) PresignedDownloadUrl(bucket, key, filename string) (string, error) { + args := m.Called(bucket, key, filename) + return args.String(0), args.Error(1) +} + +func (m *MockS3Client) UploadFile(bucketName, pathToFile, fileName, targetFileName string) (*s3.PutObjectOutput, error) { + return nil, nil +} + +func (m *MockS3Client) DownloadAndUnzipFile(bucketName, targetFolder, inputFileName, targetFileName, unzippedFiledName string) (string, error) { + + args := m.Called(bucketName, targetFolder, inputFileName, targetFileName, unzippedFiledName) + return args.String(0), args.Error(1) +} + +func (m *MockS3Client) DownloadFile(bucketName, targetFolder, inputFileName, targetFileName string) error { + + args := m.Called(bucketName, targetFolder, inputFileName, targetFileName) + return args.Error(0) +} + +func (m *MockS3Client) CheckIfPresent(bucketName, fileName string) (bool, error) { + + args := m.Called(bucketName, fileName) + return args.Bool(0), args.Error(1) +} + +func (m *MockS3Client) PreSignedUploadUrl(bucket, sessionId, extension, contentType string) (string, error) { + args := m.Called(bucket, sessionId, extension, contentType) + return args.String(0), args.Error(1) +} diff --git a/alfred/mocks/MockWebClient.go b/alfred/mocks/MockWebClient.go new file mode 100644 index 0000000..fb249fe --- /dev/null +++ b/alfred/mocks/MockWebClient.go @@ -0,0 +1,14 @@ +package mocks + +import ( + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/mock" +) + +type MockWebClient struct { + mock.Mock +} + +func (m *MockWebClient) IngestWebSession(c *gin.Context) { + m.Called(c) +} diff --git a/alfred/mocks/MockWebClientFactory.go b/alfred/mocks/MockWebClientFactory.go new file mode 100644 index 0000000..dccae6d --- /dev/null +++ b/alfred/mocks/MockWebClientFactory.go @@ -0,0 +1,20 @@ +package mocks + +import ( + "alfred/cmd/ingester/app/service/interfaces" + kafka "alfred/pkg/kafka/produce" + "github.com/stretchr/testify/mock" +) + +type MockWebClientFactory struct { + mock.Mock +} + +func (m *MockWebClientFactory) Initialize(producer kafka.KProducer) { + m.Called(producer) +} + +func (m *MockWebClientFactory) CreateWebClient(client string) (interfaces.WebClient, error) { + args := m.Called(client) + return args.Get(0).(interfaces.WebClient), args.Error(1) +} diff --git a/alfred/mocks/MockWebClientManager.go b/alfred/mocks/MockWebClientManager.go new file mode 100644 index 0000000..fe138ef --- /dev/null +++ b/alfred/mocks/MockWebClientManager.go @@ -0,0 +1,15 @@ +package mocks + +import ( + "alfred/cmd/ingester/app/service/interfaces" + "github.com/stretchr/testify/mock" +) + +type MockWebClientManager struct { + mock.Mock +} + +func (m *MockWebClientManager) GetWebClientByType(clientName string) (interfaces.WebClient, error) { + args := m.Called(clientName) + return args.Get(0).(interfaces.WebClient), args.Error(1) +} diff --git a/alfred/mocks/MockWebClientValidator.go b/alfred/mocks/MockWebClientValidator.go new file mode 100644 index 0000000..c3234e9 --- /dev/null +++ b/alfred/mocks/MockWebClientValidator.go @@ -0,0 +1,15 @@ +package mocks + +import ( + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/mock" +) + +type MockWebClientValidator struct { + mock.Mock +} + +func (m *MockWebClientValidator) ValidateWebClientUsingProjectName(c *gin.Context) (string, error) { + args := m.Called(c) + return args.String(0), args.Error(1) +} diff --git a/alfred/model/.gitignore b/alfred/model/.gitignore new file mode 100644 index 0000000..e69de29 diff --git a/alfred/model/clients/response_model.go b/alfred/model/clients/response_model.go new file mode 100644 index 0000000..929eb41 --- /dev/null +++ b/alfred/model/clients/response_model.go @@ -0,0 +1,60 @@ +package clients + +type DataResponse struct { + DeviceId string `json:"deviceId"` +} + +type CustomerFederationResponse struct { + Data struct { + GetCustomer struct { + GetDevice []DataResponse `json:"getDevice,omitempty"` + } `json:"getCustomer,omitempty"` + } `json:"data,omitempty"` +} + +type CustomerServiceResponse struct { + Data struct { + NaviReferenceId string `json:"customerReferenceId"` + PhoneNumber string `json:"phoneNumber"` + ExternalId string `json:"externalId"` + } `json:"data"` + StatusCode int `json:"statusCode"` +} + +type CustomerServiceAPIResponse struct { + CustomerReferenceId string `json:"customerReferenceId"` + ExternalId string `json:"externalId"` + Name string `json:"name"` + PhoneNumber string `json:"phoneNumber"` + Dob string `json:"dob"` + Pan string `json:"pan"` + EntityType string `json:"entityType"` +} + +type MjolnirError struct { + Code string `json:"code,omitempty"` + Message string `json:"message,omitempty"` + Params interface{} `json:"params,omitempty"` +} + +type MjolnirSessionResponse struct { + SessionToken string `json:"sessionToken,omitempty"` + ClientId string `json:"clientId,omitempty"` + EmailId string `json:"emailId,omitempty"` + AccountId string `json:"accountId,omitempty"` + PhoneNumber string `json:"phoneNumber,omitempty"` + PreferredUsername string `json:"preferred_username,omitempty"` + Roles []string `json:"roles,omitempty"` + Groups []string `json:"groups,omitempty"` + Permissions []string `json:"permissions,omitempty"` + StatusCode int `json:"statusCode,omitempty"` + Errors []MjolnirError `json:"errors"` +} + +type CruiseCacheInvalidateResponse struct { + StatusCode int `json:"status,omitempty"` +} + +type DataScienceMaskingServiceResponse struct { + S3Response string `json:"s3_response,omitempty"` +} diff --git a/alfred/model/common/errors.go b/alfred/model/common/errors.go new file mode 100644 index 0000000..4728152 --- /dev/null +++ b/alfred/model/common/errors.go @@ -0,0 +1,20 @@ +package common + +type InvalidSessionError struct{} + +func (s *InvalidSessionError) Error() string { + return "no session found" +} + +type InvalidRequestError struct{} + +func (s *InvalidRequestError) Error() string { + return "invalid request" +} + +type InvalidDeviceRequestError struct { +} + +func (s *InvalidDeviceRequestError) Error() string { + return "no device attribute found" +} diff --git a/alfred/model/common/generic_response.go b/alfred/model/common/generic_response.go new file mode 100644 index 0000000..57de749 --- /dev/null +++ b/alfred/model/common/generic_response.go @@ -0,0 +1,34 @@ +package common + +type Error struct { + Message string `json:"message,omitempty"` + Metadata interface{} `json:"metadata,omitempty"` +} + +type Response struct { + Data interface{} `json:"data"` + Error *Error `json:"error,omitempty"` + Status int `json:"status,omitempty"` +} + +type Page struct { + TotalElements int `json:"totalElements"` + TotalPages int64 `json:"totalPages"` + PageSize int `json:"pageSize"` + SortDirection SortDirection `json:"sort,omitempty"` + PageNumber int64 `json:"pageNumber"` +} + +// PaginatedResponse can be used for paginated response +type PaginatedResponse struct { + Data interface{} `json:"data"` + Page Page `json:"pages,omitempty"` + Status int `json:"status,omitempty"` +} + +type SortDirection string + +const ( + DESC SortDirection = "desc" + ASC SortDirection = "asc" +) diff --git a/alfred/model/common/shedlock_model.go b/alfred/model/common/shedlock_model.go new file mode 100644 index 0000000..5e22d6e --- /dev/null +++ b/alfred/model/common/shedlock_model.go @@ -0,0 +1,8 @@ +package common + +type ShedLock struct { + Name string `json:"name"` + LockedAt int64 `json:"locked_at"` + LockedBy string `json:"locked_by"` + LockedUntil int64 `json:"locked_until"` +} diff --git a/alfred/model/core/EventsAndCoordinatesMap.go b/alfred/model/core/EventsAndCoordinatesMap.go new file mode 100644 index 0000000..da8d159 --- /dev/null +++ b/alfred/model/core/EventsAndCoordinatesMap.go @@ -0,0 +1,9 @@ +package core + +import "alfred/model/ingester" + +type EventsAndCoordinatesMapping struct { + XCoordinate int `json:"zip_processing_status,omitempty"` + YCoordinate int `json:"processed_zip_name"` + EventType ingester.EventType `json:"fragment_order,omitempty"` +} diff --git a/alfred/model/core/cruise/model.go b/alfred/model/core/cruise/model.go new file mode 100644 index 0000000..970c650 --- /dev/null +++ b/alfred/model/core/cruise/model.go @@ -0,0 +1,61 @@ +package cruise + +type VideoQuality string +type VideoRecordingPolicy string +type OsType string + +const ( + LOW VideoQuality = "LOW" + MEDIUM VideoQuality = "MEDIUM" + HIGH VideoQuality = "HIGH" +) + +const ( + All VideoRecordingPolicy = "All" + Sampling VideoRecordingPolicy = "Sampling" +) + +const ( + ANDROID OsType = "android" + IOS OsType = "ios" +) + +type ControlConfig struct { + Enable *bool `json:"enable,omitempty"` + ConfigTime int64 `json:"config_time,omitempty"` + OsConfig OsConfig `json:"os_config,omitempty"` + Type OsType `json:"type,omitempty"` + MetricsConfig MetricsConfig `json:"metrics_config,omitempty"` + RecordingsConfig RecordingsConfig `json:"recordings_config,omitempty"` +} + +type MetricsConfig struct { + Enable *bool `json:"enable,omitempty"` + DisableAPIPerformance *bool `json:"disable_api_performance,omitempty"` + DisableRemoteLogging *bool `json:"disable_remote_logging,omitempty"` + DisableCPUMonitoring *bool `json:"disable_cpu_monitoring,omitempty"` + DisableMemoryMonitoring *bool `json:"disable_memory_monitoring,omitempty"` + DisableDeviceModels []string `json:"disable_device_models,omitempty"` + DisableApiResponse *bool `json:"disable_api_response,omitempty"` + DisableApiRequest *bool `json:"disable_api_request,omitempty"` + EnabledApiPaths []string `json:"enabled_api_paths,omitempty"` +} + +type RecordingsConfig struct { + Enable *bool `json:"enable,omitempty"` + VideoQuality VideoQuality `json:"video_quality,omitempty"` + VideoRecordingPolicy VideoRecordingPolicy `json:"video_recording_policy,omitempty"` + SnapshotPerSecond *int8 `json:"snapshot_per_second,omitempty"` + DisableCrashRecording *bool `json:"disable_crash_recording,omitempty"` + DisableANRRecording *bool `json:"disable_anr_recording,omitempty"` + DisableScreens []string `json:"disable_screens,omitempty"` + DisableDeviceModels []string `json:"disable_device_models,omitempty"` + DisableModules []string `json:"disable_modules,omitempty"` + MaskedEnabledScreens []string `json:"masked_enabled_screens,omitempty"` +} + +type OsConfig struct { + AppVersionCode string `json:"app_version_code"` + AppVersion string `json:"app_version"` + OsVersion string `json:"os_version"` +} diff --git a/alfred/model/core/device_metrics.go b/alfred/model/core/device_metrics.go new file mode 100644 index 0000000..2c90557 --- /dev/null +++ b/alfred/model/core/device_metrics.go @@ -0,0 +1,20 @@ +package core + +type DeviceMetricsEsResponse struct { + Source struct { + DeviceMetrics DeviceMetrics `json:"device_metrics"` + } `json:"_source"` +} + +type DeviceMetricsModel struct { + DeviceMetrics DeviceMetrics `json:"device_metrics"` +} + +type DeviceMetrics struct { + SnapshotPerSecond int64 `json:"snapshot_per_second"` + CreatedAt int64 `json:"created_at"` + ClientName string `json:"client"` + BatteryDropMap map[string]float64 `json:"battery_drop_map"` + MemoryUsageMap map[string]float64 `json:"memory_usage_map"` + TimeDurationMap map[string]float64 `json:"time_duration_map"` +} diff --git a/alfred/model/core/video_fragment_status.go b/alfred/model/core/video_fragment_status.go new file mode 100644 index 0000000..e71a4ee --- /dev/null +++ b/alfred/model/core/video_fragment_status.go @@ -0,0 +1,21 @@ +package core + +type VideoFragmentStatusModel struct { + SessionId string `json:"session_id,omitempty"` + FinalStatus string `json:"video_generation_status,omitempty"` + VideoName string `json:"video_name,omitempty"` + VideoUrl string `json:"video_url,omitempty"` + RequestedAt int64 `json:"requested_at"` + RequestedBy string `json:"requested_by"` + TotalFragments int64 `json:"total_fragments,omitempty"` + VideoGeneratedTillNow int `json:"video_generated_till_now"` + FragmentsCompletedTillNow int `json:"fragments_completed_till_now"` + VideoFragmentStatusAttributes []VideoFragmentStatusAttributes `json:"video_fragment_statuses,omitempty"` +} + +type VideoFragmentStatusAttributes struct { + ZipProcessingStatus string `json:"zip_processing_status,omitempty"` + ProcessedZipName string `json:"processed_zip_name"` + FragmentOrder int `json:"fragment_order,omitempty"` + EventId string `json:"event_id,omitempty"` +} diff --git a/alfred/model/core/web_event_json_model.go b/alfred/model/core/web_event_json_model.go new file mode 100644 index 0000000..c7c3b00 --- /dev/null +++ b/alfred/model/core/web_event_json_model.go @@ -0,0 +1,6 @@ +package core + +type WebEventJsonModel struct { + EventTimestamp int64 `json:"event_timestamp"` + DomEventsData []string `json:"dom_events_data"` +} diff --git a/alfred/model/es/es.go b/alfred/model/es/es.go new file mode 100644 index 0000000..4fcfba1 --- /dev/null +++ b/alfred/model/es/es.go @@ -0,0 +1,220 @@ +package es + +import ( + "alfred/model/ferret" + "alfred/model/ingester" +) + +// Hit is +type Hit struct { + ID string `json:"_id"` + Index string `json:"_index"` + Source map[string]interface{} `json:"_source"` + Fields map[string][]interface{} `json:"fields"` +} + +// HitsObject is a part of ES query response +type HitsObject struct { + Total Total `json:"total"` + Hits []Hit `json:"hits,omitempty"` +} + +type BucketData struct { + Key interface{} `json:"key,omitempty"` +} + +type FilterData struct { + DocCount int64 `json:"doc_count,omitempty"` +} + +type AggrBuckets struct { + Buckets []BucketData `json:"buckets,omitempty"` +} + +type Aggregation struct { + Total Total `json:"total"` + Buckets AggrBuckets `json:"buckets,omitempty"` + Filter FilterData `json:"filter_data,omitempty"` +} + +// ESResponse is ES search query response +type ESResponse struct { + Took float32 `json:"took"` + Hits HitsObject `json:"hits"` + Aggregation Aggregation `json:"aggregations,omitempty"` +} + +type SessionEsResponse struct { + Took float32 `json:"took"` + Hits HitsObject `json:"hits"` + Aggregations struct { + Total Total `json:"total"` + Buckets struct { + Buckets []struct { + Key string `json:"key"` + MinTimestamp struct { + Value float64 `json:"value"` + } `json:"min_timestamp"` + MaxTimestamp struct { + Value float64 `json:"value"` + } `json:"max_timestamp"` + MaxClientTimestamp struct { + Value float64 `json:"value"` + } `json:"max_client_timestamp"` + } `json:"buckets"` + } `json:"buckets"` + } `json:"aggregations"` +} + +type WebSessionEsResponse struct { + Took float32 `json:"took"` + Hits HitsObject `json:"hits"` + Aggregations struct { + Buckets struct { + Buckets []struct { + Key string `json:"key"` + MinStartTimestamp struct { + Value float64 `json:"value"` + } `json:"min_start_timestamp"` + MaxEndTimestamp struct { + Value float64 `json:"value"` + } `json:"max_end_timestamp"` + } `json:"buckets"` + } `json:"buckets"` + } `json:"aggregations"` +} + +type SessionEsResponseForDeviceAttributes struct { + Took float32 `json:"took"` + Aggregations struct { + Filter struct { + Buckets struct { + Buckets []struct { + Key string `json:"key"` + MaxBeginningBattery struct { + Value float64 `json:"value"` + } `json:"max_beginning_battery"` + MinEndBattery struct { + Value float64 `json:"value"` + } `json:"min_end_battery"` + MinEndMemory struct { + Value float64 `json:"value"` + } `json:"min_end_memory"` + MaxEndMemory struct { + Value float64 `json:"value"` + } `json:"max_end_memory"` + MinClientTimestamp struct { + Value float64 `json:"value"` + } `json:"min_client_timestamp"` + MaxEventEndTimestamp struct { + Value float64 `json:"value"` + } `json:"max_event_end_timestamp"` + } `json:"buckets"` + } `json:"buckets"` + } `json:"filter"` + } `json:"aggregations"` +} + +// Total is +type Total struct { + Value int `json:"value"` +} + +// SessionResponse is es response of session upload +type SessionResponse struct { + Index string `json:"_index"` + Source struct { + ingester.BaseAttributes `json:"base_attributes"` + ingester.SessionUploadEventAttributes `json:"session_upload_event_attributes"` + CreatedAt int64 `json:"created_at"` + } `json:"_source"` + SessionDuration *int64 `json:"session_duration,omitempty"` +} + +type VideoGenerationStatusResponse struct { + ID string `json:"_id"` + Source struct { + FragmentsCompletedTillNow int `json:"fragments_completed_till_now"` + SessionID string `json:"session_id,omitempty"` + TotalFragments int `json:"total_fragments,omitempty"` + RequestedAt int64 `json:"requested_at,omitempty"` + RequestedBy string `json:"requested_by,omitempty"` + VideoGeneratedTillNow int `json:"video_generated_till_now"` + VideoFragmentStatuses []struct { + EventID string `json:"event_id,omitempty"` + ProcessedZipName string `json:"processed_zip_name"` + FragmentOrder int `json:"fragment_order"` + ZipProcessingStatus string `json:"zip_processing_status"` + } `json:"video_fragment_statuses"` + VideoGenerationStatus string `json:"video_generation_status,omitempty"` + VideoName string `json:"video_name,omitempty"` + VideoUrl string `json:"video_url,omitempty"` + } `json:"_source"` +} + +// EventResponse is es response of session upload +type EventResponse struct { + Source struct { + ingester.BaseAttributes `json:"base_attributes"` + ingester.EventAttributes `json:"events"` + } `json:"_source"` +} + +type Page struct { + PageSize int64 `json:"pageSize"` + PageNumber int64 `json:"pageNumber"` + TotalSize int `json:"totalSize"` + SortDirection SortDirection `json:"sort"` +} + +type SortDirection string + +const ( + DESC SortDirection = "desc" + ASC SortDirection = "asc" +) + +// WebSessionResponse is es response of session upload +type WebSessionResponse struct { + Source struct { + ingester.WebBaseAttributes `json:"base_attributes"` + ingester.WebSessionAttributes `json:"session_attribute"` + } `json:"_source"` + WebSessionDurationInMillis int64 `json:"web_session_duration,omitempty"` +} + +type ErrorEventsResponse struct { + DocId string `json:"_id"` + Index string `json:"_index"` + Source struct { + ferret.ErrorAttribute `json:"error_attributes"` + ferret.ErrorEvent `json:"error_event"` + } `json:"_source"` +} + +type EventResponseForZips struct { + Took float32 `json:"took"` + Hits HitsObject `json:"hits"` + Aggregations struct { + UniqueZipNames struct { + Buckets []struct { + Key string `json:"key"` + DocCount float64 `json:"doc_count"` + MinEventTimestamp struct { + Value float64 `json:"value"` + } `json:"min_event_timestamp"` + } `json:"buckets"` + } `json:"unique_zipNames"` + } `json:"aggregations"` +} + +type ShedlockResponse struct { + SeqNo int `json:"_seq_no"` + PrimaryTerm int `json:"_primary_term"` + Source struct { + Name string `json:"name"` + LockedAt int64 `json:"locked_at"` + LockedBy string `json:"locked_by"` + LockedUntil int64 `json:"locked_until"` + } `json:"_source"` +} diff --git a/alfred/model/ferret/error_model.go b/alfred/model/ferret/error_model.go new file mode 100644 index 0000000..0bdad08 --- /dev/null +++ b/alfred/model/ferret/error_model.go @@ -0,0 +1,76 @@ +package ferret + +import "alfred/utils" + +type ErrorEventsAttributes struct { + ErrorAttribute ErrorAttribute `json:"failure_attributes"` + ErrorEvents []ErrorEvent `json:"failures"` +} + +type ErrorEventAttribute struct { + ErrorAttribute ErrorAttribute `json:"error_attributes,omitempty"` + ErrorEvent ErrorEvent `json:"error_event,omitempty"` + CreatedAt int64 `json:"created_at,omitempty"` +} + +type ErrorAttribute struct { + SessionId string `json:"session_id,omitempty"` + ClientName ClientName `json:"client_name,omitempty"` + DeviceId string `json:"device_id"` + CustomerId string `json:"customer_id,omitempty"` + PhoneNumber string `json:"phone_number,omitempty"` + AppVersionCode string `json:"app_version_code,omitempty"` + AppVersionName string `json:"app_version_name,omitempty"` +} + +type ErrorEvent struct { + ErrorTimestamp int64 `json:"error_timestamp"` + ZipNames []string `json:"zip_name,omitempty"` + ErrorType ErrorType `json:"error_type"` + RequestURL RequestUrl `json:"request_url"` + RequestMethod RequestMethod `json:"request_method"` + ClientTs int64 `json:"client_ts,omitempty"` + ErrorName ErrorName `json:"error_name"` + ErrorStatusCode int `json:"error_status_code"` + ErrorMessage string `json:"error_message,omitempty"` + IsActive bool `json:"is_active,omitempty"` + NetworkStrengthInKbps float64 `json:"network_strength,omitempty"` + SessionId string `json:"session_id,omitempty"` + EventIdList []string `json:"event_id_list,omitempty"` +} + +type ErrorType string + +const ( + ZIP_UPLOAD_FAILURE ErrorType = "ZIP_UPLOAD_FAILURE" + API_FAILURE ErrorType = "API_FAILURE" + ES_UPLOAD_FAILURE ErrorType = "ES_UPLOAD_FAILURE" +) + +type ErrorName string + +const ( + FETCH_CRUISE ErrorName = "FETCH_CRUISE" + SESSION_UPLOAD ErrorName = "SESSION_UPLOAD" + EVENTS_UPLOAD ErrorName = "EVENTS_UPLOAD" + ES_UPLOAD ErrorName = "ES_UPLOAD" + METRICS_UPLOAD ErrorName = "METRICS_UPLOAD" + FETCH_PRESIGNED_URL ErrorName = "FETCH_PRESIGNED_URL" + ZIP_UPLOAD ErrorName = "ZIP_UPLOAD" +) + +type RequestMethod string + +const ( + GET RequestMethod = "GET" + POST RequestMethod = "POST" +) + +type ClientName string + +const ( + NAVI_USER_APP ClientName = utils.NAVI_USER_APP + COSMOS_APP ClientName = utils.COSMOS +) + +type RequestUrl string diff --git a/alfred/model/ingester/app_event.go b/alfred/model/ingester/app_event.go new file mode 100644 index 0000000..0b51ef0 --- /dev/null +++ b/alfred/model/ingester/app_event.go @@ -0,0 +1,63 @@ +package ingester + +type EventType string + +const ( + TOUCH_EVENT EventType = "TOUCH_EVENT" + SCROLL_EVENT EventType = "SCROLL_EVENT" + INFO_LOG EventType = "INFO_LOG" + WARN_LOG EventType = "WARN_LOG" + ERROR_LOG = "ERROR_LOG" + SESSION_UPLOAD_EVENT EventType = "SESSION_UPLOAD_EVENT" + CRASH_ANALYTICS_EVENT = "CRASH_ANALYTICS_EVENT" + ANR_EVENT = "ANR_EVENT" + START_RECORDING_EVENT EventType = "START_RECORDING_EVENT" + STOP_RECORDING_EVENT EventType = "STOP_RECORDING_EVENT" + SCREEN_TRANSITION_EVENT EventType = "SCREEN_TRANSITION_EVENT" +) + +type AppEvent struct { + BaseAttributes BaseAttributes `json:"base_attributes,omitempty"` + Events []EventAttributes `json:"events,omitempty"` +} + +type SessionUploadRequest struct { + BaseAttributes BaseAttributes `json:"base_attributes,omitempty"` + SessionUploadEventAttributes SessionUploadEventAttributes `json:"session_upload_event_attributes,omitempty"` + CreatedAt int64 `json:"created_at,omitempty"` +} + +type EventAttributes struct { + EventId string `json:"event_id"` + ParentSessionId string `json:"parent_session_id,omitempty"` + SessionId string `json:"session_id,omitempty"` + ScreenName string `json:"screen_name,omitempty"` + ScreenshotTime int64 `json:"screenshot_timestamp,omitempty"` + FragmentList []string `json:"fragment_list,omitempty"` + ModuleName string `json:"module_name,omitempty"` + EventName string `json:"event_name,omitempty"` + EventTimestamp int64 `json:"event_timestamp"` + Attributes map[string]interface{} `json:"attributes,omitempty"` + EventType EventType `json:"event_type,omitempty"` + ZipName string `json:"zip_name,omitempty"` +} + +type SessionUploadEventAttributes struct { + BeginningDeviceAttributes DeviceAttributes `json:"beginning_device_attributes,omitempty"` + EndDeviceAttributes DeviceAttributes `json:"end_device_attributes,omitempty"` + EventId string `json:"event_id,omitempty"` +} + +type DeviceAndNetworkAttributes struct { + DeviceId string `json:"device_id"` + NetworkType string `json:"network_type"` + NetworkStrength float64 `json:"network_strength"` + DeviceAttributes DeviceAttributes `json:"device_attributes"` +} + +type DeviceAttributes struct { + Battery float64 `json:"battery"` + Cpu float64 `json:"cpu,omitempty"` + Storage float64 `json:"storage,omitempty"` + Memory float64 `json:"memory,omitempty"` +} diff --git a/alfred/model/ingester/base_model.go b/alfred/model/ingester/base_model.go new file mode 100644 index 0000000..9cd3539 --- /dev/null +++ b/alfred/model/ingester/base_model.go @@ -0,0 +1,46 @@ +package ingester + +type BaseAttributes struct { + AppVersionCode string `json:"app_version_code,omitempty"` + AppVersionName string `json:"app_version_name,omitempty"` + ClientTs int64 `json:"client_ts"` + DeviceId string `json:"device_id,omitempty"` + DeviceModel string `json:"device_model,omitempty"` + DeviceManufacturer string `json:"device_manufacturer,omitempty"` + ScreenResolution string `json:"screen_resolution,omitempty"` + AppOS string `json:"app_os,omitempty"` + OsVersion string `json:"os_version,omitempty"` + Latitude float32 `json:"latitude,omitempty"` + Longitude float32 `json:"longitude,omitempty"` + NetworkType string `json:"network_type,omitempty"` + CustomerId string `json:"customer_id,omitempty"` + UpTime int64 `json:"up_time,omitempty"` + CarrierName string `json:"carrier_name,omitempty"` + Metadata map[string]string `json:"metadata,omitempty"` + SessionTimeStamp int64 `json:"session_time_stamp,omitempty"` + EventTimestamp int64 `json:"event_timestamp,omitempty"` + SessionId string `json:"session_id"` + ParentSessionId string `json:"parent_session_id,omitempty"` + TraceId string `json:"trace_id,omitempty"` + SnapshotPerSecond int64 `json:"snapshot_per_second,omitempty"` + PhoneNumber string `json:"phone_number,omitempty"` + EventEndTimeStamp int64 `json:"event_end_time_stamp,omitempty"` + HasErrors bool `json:"has_errors,omitempty"` + ImageType string `json:"image_type,omitempty"` + FileTypeExtension string `json:"file_type_extension,omitempty"` +} + +type WebBaseAttributes struct { + DeviceId string `json:"device_id,omitempty"` + UserEmail string `json:"user_email,omitempty"` + ProjectName string `json:"project_name,omitempty"` + BrowserType string `json:"browser_type,omitempty"` + DeviceOs string `json:"device_os,omitempty"` + DeviceType string `json:"device_type,omitempty"` + BrowserVersion string `json:"browser_version,omitempty"` + ScreenResolution string `json:"screen_resolution,omitempty"` + Metadata map[string]interface{} `json:"metadata,omitempty"` + ClientTimestamp int64 `json:"client_timestamp"` + SessionId string `json:"session_id"` + Version int64 `json:"version,omitempty"` +} diff --git a/alfred/model/ingester/fragment_model.go b/alfred/model/ingester/fragment_model.go new file mode 100644 index 0000000..62f7ce4 --- /dev/null +++ b/alfred/model/ingester/fragment_model.go @@ -0,0 +1,11 @@ +package ingester + +type FragmentModel struct { + FragmentAttributes FragmentAttributes `json:"fragment_attributes,omitempty"` +} + +type FragmentAttributes struct { + FragmentName string `json:"fragment_name,omitempty"` + ScreenName string `json:"screen_name,omitempty"` + Vertical string `json:"vertical,omitempty"` +} diff --git a/alfred/model/ingester/performance_metrics.go b/alfred/model/ingester/performance_metrics.go new file mode 100644 index 0000000..159627b --- /dev/null +++ b/alfred/model/ingester/performance_metrics.go @@ -0,0 +1,93 @@ +package ingester + +type MetricType string + +const ( + API_METRICS MetricType = "API_METRICS" + CRASH_METRICS = "CRASH_METRICS" + SCREEN_TRANSITION_METRICS = "SCREEN_TRANSITION_METRICS" + APP_METRICS = "APP_METRICS" +) + +type AppMetrics struct { + BaseAttributes BaseAttributes `json:"base_attributes,omitempty"` + MetricsAttributes []MetricsAttributes `json:"metrics_attributes,omitempty"` +} + +type MetricsAttributes struct { + EventId string `json:"event_id,omitempty"` + EventName string `json:"event_name,omitempty"` + EventTimestamp int64 `json:"event_timestamp,omitempty"` + SessionId string `json:"session_id,omitempty"` + Attributes map[string]interface{} `json:"attributes,omitempty"` + EventType MetricType `json:"event_type,omitempty"` +} + +type Metric struct{} + +type ApiMetric struct { + Metric + Url string `json:"url,omitempty"` + Method string `json:"method,omitempty"` + ResponseCode int16 `json:"response_code,omitempty"` + BytesSent int64 `json:"bytes_sent,omitempty"` + BytesReceived int64 `json:"bytes_received,omitempty"` + StartTime int64 `json:"start_time,omitempty"` + EndTime int64 `json:"end_time,omitempty"` + DurationInMs int32 `json:"duration_in_ms,omitempty"` + ErrorType string `json:"error_type,omitempty"` + ErrorMessage string `json:"error_message,omitempty"` + ScreenName string `json:"screen_name,omitempty"` + ModuleName string `json:"module_name,omitempty"` +} + +type CrashMetric struct { + Metric + Exceptions []ExceptionInfo `json:"exceptions,omitempty"` + Threads []ThreadInfo `json:"threads,omitempty"` +} + +type ScreenTransitionMetric struct { + Metric + CurrentScreenName *string `json:"current_screen_name,omitempty"` + PreviousScreenName *string `json:"previous_screen_name,omitempty"` + LoadTimeInMs *int32 `json:"load_time_in_ms,omitempty"` + LatencyInMs *int32 `json:"latency_in_ms,omitempty"` + ErrorType string `json:"error_type,omitempty"` + ErrorMessage string `json:"error_message,omitempty"` +} + +type AppPerformanceMetric struct { + Metric + AppStartTime *int64 `json:"app_start_time,omitempty"` + AppDiskUsage *int64 `json:"app_disk_usage,omitempty"` + MemoryUsed *int64 `json:"memory_used,omitempty"` + MemoryWarningTimeStamps []int64 `json:"memory_warning_time_stamps,omitempty"` + CriticalCpuIntervals []Interval `json:"critical_cpu_intervals,omitempty"` + NetworkInterfaceIntervals []Interval `json:"network_interface_intervals,omitempty"` + AnrIntervals []AnrInterval `json:"anr_intervals,omitempty"` +} + +type AnrInterval struct { + StartTime int64 `json:"start_time,omitempty"` + EndTime int64 `json:"end_time,omitempty"` + Threads []ThreadInfo `json:"threads,omitempty"` +} + +type ExceptionInfo struct { + Name string `json:"name,omitempty"` + Message string `json:"message,omitempty"` + StackTrace []string `json:"stack_trace,omitempty"` +} + +type ThreadInfo struct { + Name string `json:"name,omitempty"` + Priority int32 `json:"priority,omitempty"` + StackTrace []string `json:"stack_trace,omitempty"` +} + +type Interval struct { + StartTime int64 `json:"start_time,omitempty"` + EndTime int64 `json:"end_time,omitempty"` + Value string `json:"value,omitempty"` +} diff --git a/alfred/model/ingester/web_model.go b/alfred/model/ingester/web_model.go new file mode 100644 index 0000000..0e521e0 --- /dev/null +++ b/alfred/model/ingester/web_model.go @@ -0,0 +1,15 @@ +package ingester + +type WebSessionUploadRequest struct { + BaseAttributes WebBaseAttributes `json:"base_attributes"` + SessionAttributes WebSessionAttributes `json:"session_attribute"` + CreatedAt int64 `json:"created_at"` +} + +type WebSessionAttributes struct { + EventId string `json:"event_id,omitempty"` + StartTimestamp int64 `json:"start_timestamp,omitempty"` + EndTimestamp int64 `json:"end_timestamp,omitempty"` + Data []string `json:"data,omitempty"` + DataCounter int64 `json:"data_counter,omitempty"` +} diff --git a/alfred/model/mapper/cruise_mapper.go b/alfred/model/mapper/cruise_mapper.go new file mode 100644 index 0000000..424813e --- /dev/null +++ b/alfred/model/mapper/cruise_mapper.go @@ -0,0 +1,61 @@ +package mapper + +type VideoQuality string +type VideoRecordingPolicy string +type OsType string + +const ( + Low VideoQuality = "Low" + Medium = "Medium" + High = "High" +) + +const ( + All VideoRecordingPolicy = "All" + Sampling = "Sampling" +) + +const ( + ANDROID OsType = "Android" + IOS OsType = "Ios" +) + +type CruiseMapper struct { + Enable *bool `json:"enable,omitempty"` + ConfigTime int64 `json:"config_time,omitempty"` + OsConfig OsConfig `json:"os_config,omitempty"` + Type OsType `json:"type,omitempty"` + MetricsConfig MetricsConfig `json:"metrics_config,omitempty"` + RecordingsConfig RecordingsConfig `json:"recordings_config,omitempty"` +} + +type MetricsConfig struct { + Enable *bool `json:"enable,omitempty"` + DisableAPIPerformance *bool `json:"disable_api_performance,omitempty"` + DisableRemoteLogging *bool `json:"disable_remote_logging,omitempty"` + DisableCPUMonitoring *bool `json:"disable_cpu_monitoring,omitempty"` + DisableMemoryMonitoring *bool `json:"disable_memory_monitoring,omitempty"` + DisableDeviceModels []string `json:"disable_device_models,omitempty"` + DisableApiResponse *bool `json:"disable_api_response,omitempty"` + DisableApiRequest *bool `json:"disable_api_request,omitempty"` + EnabledApiPaths []string `json:"enabled_api_paths,omitempty"` +} + +type RecordingsConfig struct { + Enable *bool `json:"enable,omitempty"` + VideoQuality VideoQuality `json:"video_quality,omitempty"` + VideoRecordingPolicy VideoRecordingPolicy `json:"video_recording_policy,omitempty"` + SnapshotPerSecond *int8 `json:"snapshot_per_second,omitempty"` + DisableCrashRecording *bool `json:"disable_crash_recording,omitempty"` + DisableANRRecording *bool `json:"disable_anr_recording,omitempty"` + DisableScreens []string `json:"disable_screens,omitempty"` + DisableDeviceModels []string `json:"disable_device_models,omitempty"` + DisableModules []string `json:"disable_modules,omitempty"` + MaskedEnabledScreens []string `json:"masked_enabled_screens,omitempty"` +} + +type OsConfig struct { + AppVersionCode string `json:"app_version_code"` + AppVersion string `json:"app_version"` + OsVersion string `json:"os_version"` +} diff --git a/alfred/pkg/cache/cache.go b/alfred/pkg/cache/cache.go new file mode 100644 index 0000000..2f99425 --- /dev/null +++ b/alfred/pkg/cache/cache.go @@ -0,0 +1,26 @@ +package cache + +import ( + "alfred/pkg/log" + "alfred/utils" + "fmt" + "time" +) + +func (c *Client) PutWithTtl(key string, value interface{}, time time.Duration) { + c.cacheClient.Set(key, value, time) +} + +func (c *Client) PutWithDefaultTtl(key string, value interface{}) { + log.Info(fmt.Sprintf("set %s value corresponding to %s key in cache with default ttl", value, key)) + c.cacheClient.Set(key, value, utils.DefaultCacheTtl) +} + +func (c *Client) Get(key string) (interface{}, bool) { + //c.logger.Info(fmt.Sprintf("get value corresponding to %s from cache", key)) + return c.cacheClient.Get(key) +} + +func (c *Client) Delete(key string) { + c.cacheClient.Delete(key) +} diff --git a/alfred/pkg/cache/config.go b/alfred/pkg/cache/config.go new file mode 100644 index 0000000..4f51be1 --- /dev/null +++ b/alfred/pkg/cache/config.go @@ -0,0 +1,29 @@ +package cache + +import ( + "alfred/utils" + "time" + + "github.com/patrickmn/go-cache" +) + +type ConfigClientInterface interface { + PutWithTtl(key string, value interface{}, time time.Duration) + PutWithDefaultTtl(key string, value interface{}) + Get(key string) (interface{}, bool) + Delete(key string) +} + +type Client struct { + cacheClient *cache.Cache +} + +func NewCacheConfig() *Client { + + return &Client{ + cacheClient: cache.New( + 5*time.Minute, + utils.DefaultCacheTtl, + ), + } +} diff --git a/alfred/pkg/es7/config.go b/alfred/pkg/es7/config.go new file mode 100644 index 0000000..161c079 --- /dev/null +++ b/alfred/pkg/es7/config.go @@ -0,0 +1,142 @@ +package es7 + +import ( + "alfred/config" + "alfred/internal/metrics" + "alfred/pkg/log" + "context" + "github.com/elastic/go-elasticsearch/v8" + "github.com/elastic/go-elasticsearch/v8/esapi" + "go.uber.org/zap" + "net" + "net/http" + "os" + "strings" + "time" +) + +type ElasticSearchClient interface { + GetESClient() *elasticsearch.Client + FetchESData(index string, content string) (*esapi.Response, error) + UpdateESData(indexList []string, content string) (*esapi.Response, error) + FetchESDataMultipleIndex(indexList []string, content string) (*esapi.Response, error) + UpdateESDataBulk(content string, indexList []string) (*esapi.Response, error) + FetchESDataWithDocId(index string, docId string) (*esapi.Response, error) +} + +type ElasticsearchClientImpl struct { + esClient *elasticsearch.Client +} + +func NewEsConfig(elasticsearchBaseConfig config.ElasticSearchBaseConfig) *ElasticsearchClientImpl { + cfg := elasticsearch.Config{ + Addresses: elasticsearchBaseConfig.Nodes, + Username: elasticsearchBaseConfig.Username, + Password: elasticsearchBaseConfig.Password, + Transport: &http.Transport{ + MaxIdleConnsPerHost: elasticsearchBaseConfig.MaxIdleConnection, + ResponseHeaderTimeout: 2 * time.Minute, + DialContext: (&net.Dialer{Timeout: 1 * time.Minute}).DialContext, + MaxConnsPerHost: elasticsearchBaseConfig.MaxConnection, + IdleConnTimeout: time.Duration(elasticsearchBaseConfig.IdleConnectionTimeoutInSeconds) * time.Second, + }, + } + es, err := elasticsearch.NewClient(cfg) + if err != nil { + log.Error("elasticsearch connection failed", zap.Error(err)) + os.Exit(1) + } + log.Info("elasticsearch connected ...") + + return &ElasticsearchClientImpl{ + esClient: es, + } +} + +func (esClient *ElasticsearchClientImpl) GetESClient() *elasticsearch.Client { + return esClient.esClient +} + +func (esClient *ElasticsearchClientImpl) FetchESData(index string, content string) (*esapi.Response, error) { + search, err := esClient.esClient.Search(esClient.esClient.Search.WithContext(context.Background()), + esClient.esClient.Search.WithIndex(index+"*"), + esClient.esClient.Search.WithBody(strings.NewReader(content)), + esClient.esClient.Search.WithTrackTotalHits(true)) + if err != nil { + metrics.ElasticSearchFetchFailureCounter.WithLabelValues(index).Inc() + log.Error("elasticsearch search query failed", zap.Error(err)) + return nil, err + } + metrics.ElasticSearchFetchSuccessCounter.WithLabelValues(index).Inc() + return search, nil +} + +func (esClient *ElasticsearchClientImpl) FetchESDataMultipleIndex(indexList []string, content string) (*esapi.Response, error) { + search, err := esClient.esClient.Search(esClient.esClient.Search.WithContext(context.Background()), + esClient.esClient.Search.WithIndex(indexList...), + esClient.esClient.Search.WithBody(strings.NewReader(content)), + esClient.esClient.Search.WithTrackTotalHits(true)) + if err != nil { + for _, index := range indexList { + metrics.ElasticSearchFetchFailureCounter.WithLabelValues(index).Inc() + } + log.Error("elasticsearch search query failed", zap.Error(err)) + return nil, err + } + for _, index := range indexList { + metrics.ElasticSearchFetchSuccessCounter.WithLabelValues(index).Inc() + } + return search, nil +} + +func (esClient *ElasticsearchClientImpl) UpdateESData(indexList []string, content string) (*esapi.Response, error) { + update, err := esClient.esClient.UpdateByQuery( + indexList, + esClient.esClient.UpdateByQuery.WithBody(strings.NewReader(content)), + esClient.esClient.UpdateByQuery.WithContext(context.Background()), + ) + if err != nil { + for _, index := range indexList { + metrics.ElasticSearchUpdateFailureCounter.WithLabelValues(index).Inc() + } + log.Error("elasticsearch update query failed", zap.Error(err)) + return nil, err + } + for _, index := range indexList { + metrics.ElasticSearchUpdateSuccessCounter.WithLabelValues(index).Inc() + } + return update, nil +} + +func (esClient *ElasticsearchClientImpl) UpdateESDataBulk(content string, indexList []string) (*esapi.Response, error) { + bulkUpdateResponse, err := esClient.esClient.Bulk( + strings.NewReader(content), + esClient.esClient.Bulk.WithContext(context.Background()), + ) + if err != nil { + for _, index := range indexList { + metrics.ElasticSearchDeleteFailureCounter.WithLabelValues(index).Inc() + } + log.Error("elasticsearch bulk update query failed", zap.Error(err), zap.String("indexList", strings.Join(indexList, ","))) + return nil, err + } + for _, index := range indexList { + metrics.ElasticSearchDeleteSuccessCounter.WithLabelValues(index).Inc() + } + return bulkUpdateResponse, nil +} + +func (esClient *ElasticsearchClientImpl) FetchESDataWithDocId(index string, docId string) (*esapi.Response, error) { + response, err := esClient.esClient.Get( + index, + docId, + esClient.esClient.Get.WithContext(context.Background()), + ) + if err != nil { + metrics.ElasticSearchFetchFailureCounter.WithLabelValues(index).Inc() + log.Error("elasticsearch Fetch query failed", zap.Error(err)) + return nil, err + } + metrics.ElasticSearchFetchSuccessCounter.WithLabelValues(index).Inc() + return response, nil +} diff --git a/alfred/pkg/ffmpeg/media.go b/alfred/pkg/ffmpeg/media.go new file mode 100644 index 0000000..d9ce842 --- /dev/null +++ b/alfred/pkg/ffmpeg/media.go @@ -0,0 +1,252 @@ +package ffmpeg + +import ( + "alfred/config" + "alfred/pkg/log" + "alfred/utils" + "encoding/json" + "errors" + "fmt" + "github.com/u2takey/ffmpeg-go" + "go.uber.org/zap" + "golang.org/x/image/font" + "golang.org/x/image/font/basicfont" + "golang.org/x/image/math/fixed" + "golang.org/x/image/webp" + "image" + "image/color" + "image/draw" + "image/jpeg" + "os" + "os/exec" + "path/filepath" + "strconv" +) + +func GenerateVideoFromImages(folderPath, filename, imageType string, snapshotPerSecond int64) (string, error) { + + err := ffmpeg_go. + Input( + filepath.Join(folderPath, utils.ASTERISK+imageType), + ffmpeg_go.KwArgs{"framerate": snapshotPerSecond}, + ffmpeg_go.KwArgs{"pattern_type": "glob"}, + ). + Filter("scale", ffmpeg_go.Args{"trunc(iw/2)*2:trunc(ih/2)*2"}). + Output( + filepath.Join(folderPath, filename+utils.VideoExtension.String()), + ffmpeg_go.KwArgs{"codec": "libx264"}, + ffmpeg_go.KwArgs{"crf": 20}, + ffmpeg_go.KwArgs{"pix_fmt": "yuv420p"}, + ffmpeg_go.KwArgs{"preset": "ultrafast"}, + ). + OverWriteOutput().Run() + if err != nil { + return utils.EMPTY, err + } + return filepath.Join(folderPath, filename, filename+utils.VideoExtension.String()), nil +} + +func ApplyBlurToRegion(inputImage, destFile string, height, width int, blurRatio, blurStrength string) error { + + ratio, err := strconv.ParseFloat(blurRatio, 64) + if err != nil { + return err + } + + cropHeight := int(float64(height) / ratio) + x := 0 + y := cropHeight + cropWidth := width + + inputVideo := ffmpeg_go.Input(inputImage) + croppedStream := inputVideo.Filter("crop", ffmpeg_go.Args{fmt.Sprintf("%d:%d:%d:%d", cropWidth, cropHeight, x, height-y)}) + finalStream := croppedStream.Filter("avgblur", ffmpeg_go.Args{blurStrength}) + err = ffmpeg_go.Input(inputImage).Overlay(finalStream, fmt.Sprintf("%d:%d", x, height-y)).Output(destFile).OverWriteOutput().Run() + + if err != nil { + return err + } + return nil +} + +func ApplyTouchPoints(inputImageFile, outputImageFile string, x, y int, tempDirPath, imageType string) error { + + imgFile, err := os.Open(inputImageFile) + if err != nil { + log.Error("Error while opening image", zap.String("inputImage", inputImageFile), zap.Error(err)) + return err + } + defer imgFile.Close() + + var img image.Image + switch imageType { + case utils.ImageExtensionWebp.String(): + img, err = webp.Decode(imgFile) + case utils.ImageExtensionJpeg.String(): + img, _, err = image.Decode(imgFile) + } + + if err != nil { + log.Error("Error while decoding image", zap.String("inputImage", inputImageFile), zap.String("image_type", imageType), zap.Error(err)) + return err + } + + // Create a new RGBA image to draw on. + rgba := image.NewRGBA(img.Bounds()) + + draw.Draw(rgba, img.Bounds(), img, image.Point{}, draw.Src) + + circleRadius := config.GetCoreConfig().TouchPointsConfig.TouchPointCircleRadius + circleColor := color.RGBA{R: uint8(config.GetCoreConfig().TouchPointsConfig.TouchPointRedComponent), G: uint8(config.GetCoreConfig().TouchPointsConfig.TouchPointGreenComponent), B: uint8(config.GetCoreConfig().TouchPointsConfig.TouchPointBlueComponent), A: uint8(config.GetCoreConfig().TouchPointsConfig.TouchPointAlphaComponent)} + + for px := x - circleRadius; px <= x+circleRadius; px++ { + for py := y - circleRadius; py <= y+circleRadius; py++ { + if (px-x)*(px-x)+(py-y)*(py-y) <= circleRadius*circleRadius { + rgba.Set(px, py, circleColor) + } + } + } + + err = saveImage(rgba, outputImageFile, tempDirPath) + if err != nil { + return err + } + + return nil +} + +func GenerateThirdPartyImage(imageDest, screen, pathToUnzippedFiles string, height, width int, imageType string) error { + + tempDir := utils.ThirdPartyDirectory + tempDirPath := filepath.Join(pathToUnzippedFiles, tempDir) + validFileName := utils.ConvertToValidFilename(screen) + tempFile := filepath.Join(tempDirPath, validFileName+imageType) + + if utils.FolderExists(tempDirPath) && utils.FolderExists(tempFile) { + + err := utils.CopyFile(tempFile, imageDest) + if err != nil { + return err + } + return nil + } + + if !utils.FolderExists(tempDirPath) { + _ = utils.CreateDirectory(tempDirPath) + } + + // Create a new white image + img := image.NewRGBA(image.Rect(0, 0, width, height)) + draw.Draw(img, img.Bounds(), &image.Uniform{C: color.White}, image.Point{}, draw.Src) + + // Use basicfont + face := basicfont.Face7x13 + + textWidth := font.MeasureString(face, "Third Party Screen "+screen).Ceil() + + // Calculate text position at the center + x := 1 + + if textWidth > width { + x += (textWidth - width) / 2 + } else { + x += (width - textWidth) / 2 + } + + y := (height) / 2 + + // Draw the text on the image + drawer := &font.Drawer{ + Dst: img, + Src: image.Black, + Face: face, + Dot: fixed.P(x, y), + } + drawer.DrawString("Third Party Screen " + screen) + + // Save the image to a file (overwriting if it already exists) + err := saveImage(img, tempFile, tempDirPath) + if err != nil { + return err + } + err = utils.CopyFile(tempFile, imageDest) + if err != nil { + return err + } + + return nil +} + +func ExtractImageDimensions(inputImage string) (int, int, error) { + inputInfo, err := ffmpeg_go.Probe(inputImage) + if err != nil { + return 0, 0, err + } + + var probeInfo struct { + Streams []struct { + Width int `json:"width"` + Height int `json:"height"` + } `json:"streams"` + } + + if err := json.Unmarshal([]byte(inputInfo), &probeInfo); err != nil { + log.Error("Error unmarshaling JSON data: %v", zap.Error(err)) + } + if len(probeInfo.Streams) <= 0 { + return 0, 0, err + } + + width := probeInfo.Streams[0].Width + height := probeInfo.Streams[0].Height + + return height, width, nil +} + +func saveImage(img *image.RGBA, filename, tempDirPath string) error { + outputFile, err := os.Create(filename) + if err != nil { + log.Error("Error while creating output image file", zap.String("inputImage", filename), zap.String("outputImage", filename), zap.Error(err)) + return err + } + defer outputFile.Close() + + if filepath.Ext(filename) == utils.ImageExtensionJpeg.String() { + err = jpeg.Encode(outputFile, img, nil) + } else if filepath.Ext(filename) == utils.ImageExtensionWebp.String() { + err = encodeToWebp(img, filename, tempDirPath) + } else { + log.Error("Unsupported image format", zap.String("inputImage", filename), zap.String("outputImage", filename), zap.Error(err)) + return errors.New("unsupported image format") + } + // Encode the modified image to a JPEG file. + if err != nil { + log.Error("Error while encoding image", zap.String("inputImage", filename), zap.String("outputImage", filename), zap.Error(err)) + return err + } + + return nil +} + +func encodeToWebp(img image.Image, filename, tempDirPath string) error { + tmpFile := filepath.Join(tempDirPath, "tempImage.jpeg") + outFile, err := os.Create(tmpFile) + if err != nil { + return err + } + defer outFile.Close() + + err = jpeg.Encode(outFile, img, nil) + if err != nil { + return err + } + + cmd := exec.Command("cwebp", tmpFile, "-o", filename) + err = cmd.Run() + if err != nil { + return err + } + + os.Remove(tmpFile) + return nil +} diff --git a/alfred/pkg/kafka/.gitignore b/alfred/pkg/kafka/.gitignore new file mode 100644 index 0000000..e69de29 diff --git a/alfred/pkg/kafka/config.go b/alfred/pkg/kafka/config.go new file mode 100644 index 0000000..f424ff0 --- /dev/null +++ b/alfred/pkg/kafka/config.go @@ -0,0 +1,80 @@ +package kafka + +import ( + "alfred/config" + "crypto/tls" + "github.com/Shopify/sarama" + "time" +) + +func SaramaSyncProducer(env string, baseConfig config.KafkaBaseConfig) (sarama.AsyncProducer, error) { + return sarama.NewAsyncProducer(baseConfig.Brokers, kafkaProducerConfig(env, baseConfig)) +} + +func SaramaKafkaConsumer(env string, baseConfig config.KafkaBaseConfig, groupID string) (sarama.ConsumerGroup, error) { + consumerGroup, err := sarama.NewConsumerGroup(baseConfig.Brokers, groupID, kafkaConsumerConfig(env, baseConfig, groupID)) + if err != nil { + return nil, err + } + + return consumerGroup, nil +} + +func kafkaProducerConfig(env string, baseConfig config.KafkaBaseConfig) *sarama.Config { + kafkaConfig := kafkaConfiguration(env, baseConfig) + + kafkaConfig.Producer.Retry.Max = 3 + kafkaConfig.Producer.RequiredAcks = sarama.WaitForLocal + kafkaConfig.Producer.Compression = sarama.CompressionSnappy + kafkaConfig.Producer.Return.Successes = true + kafkaConfig.Producer.Flush.Bytes = 31000 + kafkaConfig.Producer.Flush.Frequency = 100 * time.Millisecond + kafkaConfig.Producer.Flush.Messages = 100 + kafkaConfig.ChannelBufferSize = 512 + kafkaConfig.Producer.MaxMessageBytes = 2000000 + + kafkaConfig.Metadata.RefreshFrequency = 1 * time.Minute + + return kafkaConfig +} + +func kafkaConsumerConfig(env string, baseConfig config.KafkaBaseConfig, groupId string) *sarama.Config { + kConfig := kafkaConfiguration(env, baseConfig) + + kConfig.Version = sarama.V3_3_1_0 + kConfig.Consumer.Offsets.Initial = sarama.OffsetNewest + kConfig.Consumer.MaxProcessingTime = 50 * time.Millisecond + kConfig.Consumer.Return.Errors = true + kConfig.ClientID = groupId + kConfig.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRoundRobin + kConfig.Consumer.Fetch.Max = 1024 * 1024 + kConfig.Consumer.Fetch.Min = 1024 + kConfig.Consumer.Fetch.Default = 32768 + + return kConfig +} + +func kafkaConfiguration(env string, baseConfig config.KafkaBaseConfig) *sarama.Config { + kafkaConfig := sarama.NewConfig() + + if env == "local" { + return kafkaConfig + } else if env == "prod" { + kafkaConfig.Net.SASL.Mechanism = sarama.SASLTypePlaintext + } else { + kafkaConfig.Net.SASL.Mechanism = sarama.SASLTypeSCRAMSHA512 + kafkaConfig.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { + return &XDGSCRAMClient{HashGeneratorFcn: SHA512} + } + } + + kafkaConfig.Net.SASL.User = baseConfig.Username + kafkaConfig.Net.SASL.Password = baseConfig.Password + kafkaConfig.Net.SASL.Enable = baseConfig.SaslEnabled + kafkaConfig.Net.TLS.Enable = baseConfig.TlsEnabled + kafkaConfig.Net.TLS.Config = &tls.Config{ + InsecureSkipVerify: baseConfig.TlsInsureSkipVerification, + } + + return kafkaConfig +} diff --git a/alfred/pkg/kafka/consume/consume.go b/alfred/pkg/kafka/consume/consume.go new file mode 100644 index 0000000..e4ddc06 --- /dev/null +++ b/alfred/pkg/kafka/consume/consume.go @@ -0,0 +1,4 @@ +package consume + +type KConsumer struct { +} diff --git a/alfred/pkg/kafka/produce/produce.go b/alfred/pkg/kafka/produce/produce.go new file mode 100644 index 0000000..eda1088 --- /dev/null +++ b/alfred/pkg/kafka/produce/produce.go @@ -0,0 +1,95 @@ +package kafka + +import ( + "alfred/config" + "alfred/internal/metrics" + "alfred/pkg/kafka" + "alfred/pkg/log" + "alfred/utils" + "encoding/json" + "os" + "time" + + "github.com/Shopify/sarama" + "go.uber.org/zap" +) + +type KProducer interface { + Errors() + Successes() + SendMessage(mappedRequest interface{}, topic, key, clientName string) error +} + +type KProducerImpl struct { + AsyncProducer sarama.AsyncProducer +} + +func NewKProducer(env string, baseConfig config.KafkaBaseConfig) *KProducerImpl { + + producer, err := kafka.SaramaSyncProducer(env, baseConfig) + if err != nil { + log.Error("sarama kafka producer failed", zap.Error(err)) + os.Exit(1) + } + + kProducer := &KProducerImpl{ + AsyncProducer: producer, + } + + go func() { + kProducer.Errors() + }() + go func() { + kProducer.Successes() + }() + return kProducer +} + +// Errors keep the track of failed messages. +func (kp *KProducerImpl) Errors() { + for err := range kp.AsyncProducer.Errors() { + _, errEncode := err.Msg.Key.Encode() + if errEncode != nil { + log.Error("key encoding failed for failed message", zap.String("topic", err.Msg.Topic)) + } + + metrics.KafkaEventIngestionEventFailureCounter.WithLabelValues(err.Msg.Topic).Inc() + log.Error("failed to emit event to kafka", zap.String("topic", err.Msg.Topic), zap.String("clientName", string(err.Msg.Headers[0].Value)), zap.Error(err)) + } +} + +// Successes is to check if message successfully delivered to kafka +func (kp *KProducerImpl) Successes() { + for msg := range kp.AsyncProducer.Successes() { + _, errEncode := msg.Key.Encode() + if errEncode != nil { + log.Error("key encoding failed for failed message", zap.String("topic", msg.Topic)) + } + + metrics.KafkaEventIngestionEventSuccessCounter.WithLabelValues(msg.Topic).Inc() + } +} + +func (kp *KProducerImpl) SendMessage(mappedRequest interface{}, topic, key, clientName string) error { + startTime := time.Now() + messagePayload, err := json.Marshal(mappedRequest) + if err != nil { + return err + } + headers := []sarama.RecordHeader{ + { + Key: []byte(sarama.StringEncoder(utils.CLIENT_NAME)), + Value: []byte(sarama.StringEncoder(clientName)), + }, + } + message := &sarama.ProducerMessage{ + Topic: topic, + Key: sarama.StringEncoder(key), + Value: sarama.StringEncoder(messagePayload), + Headers: headers, + } + kp.AsyncProducer.Input() <- message + endTime := float64(time.Since(startTime)) + metrics.AlfredKafkaProduceLatencyHistogram.WithLabelValues(topic, clientName).Observe(endTime) + return nil +} diff --git a/alfred/pkg/kafka/scram_client.go b/alfred/pkg/kafka/scram_client.go new file mode 100644 index 0000000..59e91b7 --- /dev/null +++ b/alfred/pkg/kafka/scram_client.go @@ -0,0 +1,35 @@ +package kafka + +import ( + "crypto/sha512" + + "github.com/xdg-go/scram" +) + +var ( + SHA512 scram.HashGeneratorFcn = sha512.New +) + +type XDGSCRAMClient struct { + *scram.Client + *scram.ClientConversation + scram.HashGeneratorFcn +} + +func (x *XDGSCRAMClient) Begin(userName, password, authzID string) (err error) { + x.Client, err = x.HashGeneratorFcn.NewClient(userName, password, authzID) + if err != nil { + return err + } + x.ClientConversation = x.Client.NewConversation() + return nil +} + +func (x *XDGSCRAMClient) Step(challenge string) (response string, err error) { + response, err = x.ClientConversation.Step(challenge) + return +} + +func (x *XDGSCRAMClient) Done() bool { + return x.ClientConversation.Done() +} diff --git a/alfred/pkg/limiter/limiter.go b/alfred/pkg/limiter/limiter.go new file mode 100644 index 0000000..85d4702 --- /dev/null +++ b/alfred/pkg/limiter/limiter.go @@ -0,0 +1,43 @@ +package limiter + +import ( + "alfred/config" + "context" + "golang.org/x/sync/semaphore" + "time" +) + +var ( + VideoSem = initVideoSemaphore() + DefaultTimeout = initDefaultTimeout() +) + +func TryAcquire(ctx context.Context, sem *semaphore.Weighted, timeout time.Duration) bool { + ctxWithTimeout, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + err := sem.Acquire(ctxWithTimeout, 1) + return err == nil +} + +func Release(sem *semaphore.Weighted) { + if sem != nil { + sem.Release(1) + } +} + +func initVideoSemaphore() *semaphore.Weighted { + maxJobs := config.GetCoreConfig().VideoProcessingConfig.MaxConcurrentJobs + if maxJobs <= 0 { + maxJobs = 3 + } + return semaphore.NewWeighted(int64(maxJobs)) +} + +func initDefaultTimeout() time.Duration { + timeoutSeconds := config.GetCoreConfig().VideoProcessingConfig.DefaultTimeoutSeconds + if timeoutSeconds <= 0 { + timeoutSeconds = 5 + } + return time.Duration(timeoutSeconds) * time.Second +} diff --git a/alfred/pkg/log/log.go b/alfred/pkg/log/log.go new file mode 100644 index 0000000..d68345c --- /dev/null +++ b/alfred/pkg/log/log.go @@ -0,0 +1,54 @@ +package log + +import ( + "go.elastic.co/ecszap" + "go.uber.org/zap" +) + +type Logger struct { + log *zap.Logger +} + +var Log *Logger + +func InitLogger(serviceName string) { + logConfig := zap.NewProductionConfig() + logConfig.EncoderConfig = ecszap.ECSCompatibleEncoderConfig(logConfig.EncoderConfig) + log, err := logConfig.Build(ecszap.WrapCoreOption(), zap.AddCallerSkip(1)) + log = log.With(zap.String("service.name", serviceName)) + if err != nil { + panic(err) + } + + Log = &Logger{ + log: log, + } +} + +func GetLogger() *zap.Logger { + return Log.log +} + +func Error(message string, fields ...zap.Field) { + Log.log.Error(message, fields...) +} + +func Warn(message string, fields ...zap.Field) { + Log.log.Warn(message, fields...) +} + +func Info(message string, fields ...zap.Field) { + Log.log.Info(message, fields...) +} + +func Debug(message string, fields ...zap.Field) { + Log.log.Debug(message, fields...) +} + +func Fatal(message string, fields ...zap.Field) { + Log.log.Fatal(message, fields...) +} + +func Panic(message string, fields ...zap.Field) { + Log.log.Panic(message, fields...) +} diff --git a/alfred/pkg/s3/.gitignore b/alfred/pkg/s3/.gitignore new file mode 100644 index 0000000..e69de29 diff --git a/alfred/pkg/s3/config.go b/alfred/pkg/s3/config.go new file mode 100644 index 0000000..00fbf09 --- /dev/null +++ b/alfred/pkg/s3/config.go @@ -0,0 +1,40 @@ +package s3 + +import ( + "alfred/pkg/log" + "context" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/s3" + "go.uber.org/zap" + "os" +) + +type S3Client interface { + PresignedDownloadUrl(bucket, key, filename string) (string, error) + UploadFile(bucketName, pathToFile, fileName, targetFileName string) (*s3.PutObjectOutput, error) + DownloadAndUnzipFile(bucketName, targetFolder, inputFileName, targetFileName, unzippedFiledName string) (string, error) + DownloadFile(bucketName, targetFolder, inputFileName, targetFileName string) error + CheckIfPresent(bucketName, fileName string) (bool, error) + PreSignedUploadUrl(bucket, fileName, fileExtension, contentType string) (string, error) + DownloadAndUnzipFileWithExtension(bucketName, targetFolder, inputFileName, targetFileName, unzippedFiledName string, fileTypeExtension string) (string, error) + DownloadAllFilesFromFolder(bucketName, folderName, targetFolder string) ([]string, error) +} + +type S3ClientImpl struct { + s3Client *s3.Client + s3PresignedClient *s3.PresignClient +} + +func NewS3Client() *S3ClientImpl { + cfg, err := config.LoadDefaultConfig(context.TODO(), config.WithRegion("ap-south-1")) + if err != nil { + log.Error("s3 config load failed", zap.Error(err)) + os.Exit(1) + } + awsS3Client := s3.NewFromConfig(cfg) + preSignClient := s3.NewPresignClient(awsS3Client) + return &S3ClientImpl{ + s3Client: awsS3Client, + s3PresignedClient: preSignClient, + } +} diff --git a/alfred/pkg/s3/constants.go b/alfred/pkg/s3/constants.go new file mode 100644 index 0000000..3ef2729 --- /dev/null +++ b/alfred/pkg/s3/constants.go @@ -0,0 +1,6 @@ +package s3 + +const ( + numOfWorkers = 5 + workerBuffer = 100 +) diff --git a/alfred/pkg/s3/file_utils.go b/alfred/pkg/s3/file_utils.go new file mode 100644 index 0000000..0fe14ac --- /dev/null +++ b/alfred/pkg/s3/file_utils.go @@ -0,0 +1,133 @@ +package s3 + +import ( + "alfred/utils" + "archive/zip" + "bytes" + "github.com/ulikunitz/xz" + "io" + "os" + "path/filepath" + "strings" + "sync" +) + +func UnzipFile(folderPath, sourceFileName, targetFilename, fileExtension string) (string, error) { + dest := filepath.Join(folderPath, targetFilename) + sourcePath := filepath.Join(folderPath, sourceFileName) + + // Step 1: Check if the file is a `.zip.xz` + if fileExtension == utils.ZipXzExtension.String() { + // Open the .zip.xz file + sourceFile, err := os.Open(sourcePath) + if err != nil { + return "", err + } + defer sourceFile.Close() + + // Step 2: Decompress the .xz file + xzReader, err := xz.NewReader(sourceFile) + if err != nil { + return "", err + } + + // Step 3: Read decompressed data into a buffer + var buf bytes.Buffer + _, err = io.Copy(&buf, xzReader) + if err != nil { + return "", err + } + + // Step 4: Use the buffer to create a zip.Reader + read, err := zip.NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len())) + if err != nil { + return "", err + } + + // Step 5: Process the decompressed `.zip` file + return processZipFiles(read, dest) + } else { + // For regular .zip files + read, err := zip.OpenReader(sourcePath) + if err != nil { + return "", err + } + defer read.Close() + + // Process the `.zip` file + return processZipFiles(&read.Reader, dest) + } +} + +// Helper function to process zip files +func processZipFiles(read *zip.Reader, dest string) (string, error) { + err := os.MkdirAll(dest, 0700) + if err != nil { + return "", err + } + + job := make(chan *zip.File, workerBuffer) + var wg = sync.WaitGroup{} + + for w := 0; w <= numOfWorkers; w++ { + go worker(&wg, job, dest, writeZipFilesToDisk) + } + //initiate worker pool + + for _, f := range read.File { + job <- f + wg.Add(1) + } + + wg.Wait() + close(job) + + return dest, nil +} + +type workerContext struct { + file *zip.File + dest string +} + +type workerFunc func(*workerContext) + +func worker(wg *sync.WaitGroup, jobs <-chan *zip.File, dest string, workerFunc workerFunc) { + for f := range jobs { + workerFunc(&workerContext{f, dest}) + wg.Done() + } +} + +func writeZipFilesToDisk(c *workerContext) { + dest := c.dest + file := c.file + rc, err := file.Open() + if err != nil { + return + } + defer rc.Close() + + // Check for ZipSlip (Directory traversal) + if !strings.Contains(filepath.Join(dest, file.Name), filepath.Clean(dest)+string(os.PathSeparator)) || file.FileInfo().IsDir() { + return + } + + destFile, err := os.OpenFile(filepath.Join(dest, file.Name), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, file.Mode()) + if err != nil { + return + } + defer destFile.Close() + _, err = io.Copy(destFile, rc) + if err != nil { + return + } +} + +func CreateFileFromByte(filename, fileExtension string, data []byte) error { + return os.WriteFile(filename+fileExtension, data, 0600) +} + +func ReadFile(filename, fileExtension string) ([]byte, error) { + return os.ReadFile(filename + fileExtension) +} diff --git a/alfred/pkg/s3/s3_operations.go b/alfred/pkg/s3/s3_operations.go new file mode 100644 index 0000000..8bd3538 --- /dev/null +++ b/alfred/pkg/s3/s3_operations.go @@ -0,0 +1,184 @@ +package s3 + +import ( + "alfred/config" + "alfred/internal/metrics" + "alfred/pkg/log" + "alfred/utils" + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + "github.com/aws/aws-sdk-go-v2/service/s3" + "go.uber.org/zap" + "io" + "net/http" + "os" + "path/filepath" + "time" +) + +func (s3c *S3ClientImpl) PresignedDownloadUrl(bucket, key, filename string) (string, error) { + expiration := time.Now().Add(time.Hour * 12) + disposition := fmt.Sprintf("attachment; filename=\"%v\"", filename) + + res, err := s3c.s3PresignedClient.PresignGetObject(context.Background(), &s3.GetObjectInput{ + Bucket: &bucket, + Key: &key, + ResponseContentDisposition: &disposition, + ResponseExpires: &expiration, + }) + if err != nil { + metrics.S3OperationFailureCounter.WithLabelValues(bucket, "PresignGetObject").Inc() + return "", err + } + metrics.S3OperationSuccessCounter.WithLabelValues(bucket, "PresignGetObject").Inc() + return res.URL, nil + +} + +func (s3c *S3ClientImpl) UploadFile(bucketName, pathToFile, fileName, targetFileName string) (*s3.PutObjectOutput, error) { + file, err := os.Open(filepath.Join(pathToFile, fileName)) + if err != nil { + return nil, err + } + defer file.Close() + uploadResponse, err := s3c.s3Client.PutObject(context.TODO(), &s3.PutObjectInput{ + Bucket: aws.String(bucketName), + Key: aws.String(targetFileName), + Body: file, + }) + if err != nil { + metrics.S3OperationFailureCounter.WithLabelValues(bucketName, "PutObject").Inc() + return nil, err + } + metrics.S3OperationSuccessCounter.WithLabelValues(bucketName, "PutObject").Inc() + + return uploadResponse, err +} + +// DownloadAndUnzipFile will download the zip file from the given bucket and +// unzip it to the target folder with the file name provided. +func (s3c *S3ClientImpl) DownloadAndUnzipFile(bucketName, targetFolder, inputFileName, targetFileName, unzippedFiledName string) (string, error) { + err := s3c.DownloadFile(bucketName, targetFolder, inputFileName, targetFileName) + if err != nil { + return utils.EMPTY, err + } + pathToFiles, err := UnzipFile(utils.TempDestinationFolder, targetFileName, unzippedFiledName, utils.ZipExtension.String()) + if err != nil { + metrics.MediaGenerationFailureCounter.WithLabelValues(utils.ZipExtension.String()) + log.Error("Error while unzipping file", zap.String("bucketName", bucketName), zap.String("inputFileName", inputFileName), zap.Error(err)) + return utils.EMPTY, err + } + metrics.MediaGenerationSuccessCounter.WithLabelValues(utils.ZipExtension.String()) + return pathToFiles, nil +} + +func (s3c *S3ClientImpl) DownloadAndUnzipFileWithExtension(bucketName, targetFolder, inputFileName, targetFileName, unzippedFiledName string, fileTypeExtension string) (string, error) { + err := s3c.DownloadFile(bucketName, targetFolder, inputFileName, targetFileName) + if err != nil { + return utils.EMPTY, err + } + pathToFiles, err := UnzipFile(utils.TempDestinationFolder, targetFileName, unzippedFiledName, fileTypeExtension) + if err != nil { + metrics.MediaGenerationFailureCounter.WithLabelValues(fileTypeExtension) + log.Error("Error while unzipping file", zap.String("bucketName", bucketName), zap.String("inputFileName", inputFileName), zap.Error(err)) + return utils.EMPTY, err + } + metrics.MediaGenerationSuccessCounter.WithLabelValues(fileTypeExtension) + return pathToFiles, nil +} + +func (s3c *S3ClientImpl) DownloadFile(bucketName, targetFolder, inputFileName, targetFileName string) error { + result, err := s3c.s3Client.GetObject(context.TODO(), &s3.GetObjectInput{ + Bucket: aws.String(bucketName), + Key: aws.String(inputFileName), + }) + if err != nil { + metrics.S3OperationFailureCounter.WithLabelValues(bucketName, "GetObject").Inc() + log.Error("Error while downloading file", zap.String("bucketName", bucketName), zap.String("inputFileName", inputFileName), zap.Error(err)) + return err + } + metrics.S3OperationSuccessCounter.WithLabelValues(bucketName, "GetObject").Inc() + + defer result.Body.Close() + finalTargetFilePath := filepath.Join(targetFolder, targetFileName) + file, err := os.Create(finalTargetFilePath) + if err != nil { + log.Error("Error while creating file", zap.String("finalTargetFilePath", finalTargetFilePath), zap.Error(err)) + return err + } + defer file.Close() + body, err := io.ReadAll(result.Body) + if err != nil { + log.Error("Error while reading file", zap.String("finalTargetFilePath", finalTargetFilePath), zap.Error(err)) + } + _, err = file.Write(body) + return err +} + +func (s3c *S3ClientImpl) CheckIfPresent(bucketName, fileName string) (bool, error) { + _, err := s3c.s3Client.HeadObject(context.TODO(), &s3.HeadObjectInput{ + Bucket: aws.String(bucketName), + Key: aws.String(fileName), + }) + if err != nil { + var responseError *awshttp.ResponseError + if errors.As(err, &responseError) && responseError.ResponseError.HTTPStatusCode() == http.StatusNotFound { + return false, nil + } + metrics.S3OperationFailureCounter.WithLabelValues(bucketName, "HeadObject").Inc() + return false, err + } + metrics.S3OperationSuccessCounter.WithLabelValues(bucketName, "HeadObject").Inc() + + return true, nil +} + +func (s3c *S3ClientImpl) PreSignedUploadUrl(bucket, fileName, fileExtension, contentType string) (string, error) { + expiration := time.Hour * 2 + _, err := utils.ValidateFileExtension(fileName, fileExtension) + if err != nil { + return "", err + } + putObjectArgs := s3.PutObjectInput{ + Bucket: &bucket, + Key: &fileName, + ContentType: &contentType, + } + res, err := s3c.s3PresignedClient.PresignPutObject( + context.Background(), + &putObjectArgs, + s3.WithPresignExpires(expiration)) + if err != nil { + metrics.S3OperationFailureCounter.WithLabelValues(config.GetIngesterConfig().S3Config.SessionUploadBucket, "PresignPutObject").Inc() + return "", err + } + metrics.S3OperationSuccessCounter.WithLabelValues(bucket, "PresignPutObject").Inc() + + return res.URL, nil +} + +func (s3c *S3ClientImpl) DownloadAllFilesFromFolder(bucketName, folderName, targetFolder string) ([]string, error) { + listObjectsOutput, err := s3c.s3Client.ListObjects(context.TODO(), &s3.ListObjectsInput{ + Bucket: aws.String(bucketName), + Prefix: aws.String(folderName), + }) + if err != nil { + metrics.S3OperationFailureCounter.WithLabelValues(bucketName, "ListObjects").Inc() + return nil, err + } + metrics.S3OperationSuccessCounter.WithLabelValues(bucketName, "ListObjects").Inc() + + var downloadedFiles []string + for _, content := range listObjectsOutput.Contents { + err := s3c.DownloadFile(bucketName, targetFolder, *content.Key, filepath.Base(*content.Key)) + if err != nil { + log.Error("Error while downloading file", zap.String("bucketName", bucketName), zap.String("inputFileName", *content.Key), zap.Error(err)) + return nil, err + } + downloadedFiles = append(downloadedFiles, filepath.Base(*content.Key)) + } + return downloadedFiles, nil +} diff --git a/alfred/repository/appFragmentsRepository.go b/alfred/repository/appFragmentsRepository.go new file mode 100644 index 0000000..d73d25a --- /dev/null +++ b/alfred/repository/appFragmentsRepository.go @@ -0,0 +1,63 @@ +package repository + +import ( + "alfred/internal/metrics" + "alfred/model/ingester" + "alfred/pkg/es7" + "alfred/pkg/log" + "alfred/utils" + "context" + "fmt" + "github.com/elastic/go-elasticsearch/v8/esapi" + "go.uber.org/zap" + "strings" +) + +type AppFragmentsRepository interface { + CreateFragment(fragmentData ingester.FragmentModel, message string, fragmentIngestionIndex string) error + FetchUniqueFragments(key string, fragmentIngestionIndex string) (*esapi.Response, error) +} + +type AppFragmentsRepositoryImpl struct { + esClient es7.ElasticSearchClient +} + +func NewAppFragmentsRepository(esClient es7.ElasticSearchClient) AppFragmentsRepository { + return &AppFragmentsRepositoryImpl{ + esClient: esClient, + } +} + +func (r *AppFragmentsRepositoryImpl) CreateFragment(fragmentData ingester.FragmentModel, message string, fragmentIngestionIndex string) error { + req := esapi.IndexRequest{ + Index: fragmentIngestionIndex, + DocumentID: utils.FRAGMENT_NAME + utils.HYPHEN + fragmentData.FragmentAttributes.FragmentName + utils.UNDERSCORE + utils.SCREEN_NAME + utils.HYPHEN + fragmentData.FragmentAttributes.ScreenName + utils.UNDERSCORE + utils.VERTICAL + utils.HYPHEN + fragmentData.FragmentAttributes.Vertical, + Body: strings.NewReader(message), + } + + indexResponse, err := req.Do(context.Background(), r.esClient.GetESClient()) + if err != nil { + metrics.ElasticSearchIngestionFailureCounter.WithLabelValues(fragmentIngestionIndex).Inc() + log.Error("fragment insertion document in ES failed", zap.String("response", fmt.Sprintf("%v", indexResponse)), zap.Error(err)) + return err + } + defer indexResponse.Body.Close() + metrics.ElasticSearchIngestionSuccessCounter.WithLabelValues(fragmentIngestionIndex).Inc() + + return nil +} + +func (r *AppFragmentsRepositoryImpl) FetchUniqueFragments(key string, fragmentIngestionIndex string) (*esapi.Response, error) { + content := getUniqueValues(key) + result, err := r.esClient.FetchESData(fragmentIngestionIndex, content) + if err != nil { + return nil, err + } + return result, nil +} + +func getUniqueValues(key string) string { + termsAggregationQuery := createTermsAggregationQuery(key, utils.EsUpperLimit) + aggregationQuery := createAggregationQuery(buildAggregationQuery("buckets", termsAggregationQuery)) + return createEsQuery(aggregationQuery, createSizeQuery(0), createTrackTotalHitsQuery(false)) +} diff --git a/alfred/repository/cruiseControlRepository.go b/alfred/repository/cruiseControlRepository.go new file mode 100644 index 0000000..600c427 --- /dev/null +++ b/alfred/repository/cruiseControlRepository.go @@ -0,0 +1,113 @@ +package repository + +import ( + "alfred/internal/metrics" + "alfred/model/core/cruise" + "alfred/pkg/es7" + "alfred/pkg/log" + "context" + "encoding/json" + "fmt" + "github.com/elastic/go-elasticsearch/v8/esapi" + "go.uber.org/zap" + "strings" +) + +type CruiseControlRepository interface { + CreateCruiseControlConfig(cruiseControl *cruise.ControlConfig, cruiseControlIndex string) error + FetchPreviousAppVersion(cruiseControlIndex, appOs string) (*esapi.Response, error) + FetchCruiseControlConfig(appVersion, appOs, cruiseControlIndex string) (*esapi.Response, error) + FetchAllCruiseControlAppVersions(cruiseControlIndex, appOs string) (*esapi.Response, error) +} + +type CruiseControlRepositoryImpl struct { + esClient es7.ElasticSearchClient +} + +func NewCruiseControlRepository(esClient es7.ElasticSearchClient) CruiseControlRepository { + return &CruiseControlRepositoryImpl{ + esClient: esClient, + } +} + +func (r *CruiseControlRepositoryImpl) CreateCruiseControlConfig(cruiseControl *cruise.ControlConfig, cruiseControlIndex string) error { + + bdy, err := json.Marshal(cruiseControl) + if err != nil { + return fmt.Errorf("cruise control request conversion to byte failed: %w", err) + } + + req := esapi.IndexRequest{ + Index: cruiseControlIndex, + DocumentID: cruiseControl.OsConfig.AppVersion, + Body: strings.NewReader(string(bdy)), + Refresh: "true", + } + + indexResponse, err := req.Do(context.Background(), r.esClient.GetESClient()) + if err != nil { + metrics.ElasticSearchIngestionFailureCounter.WithLabelValues(cruiseControlIndex).Inc() + log.Error("cruise-control insert document in ES failed", zap.String("response", fmt.Sprintf("%v", indexResponse)), zap.Error(err)) + return err + } + defer indexResponse.Body.Close() + metrics.ElasticSearchIngestionSuccessCounter.WithLabelValues(cruiseControlIndex).Inc() + + return nil +} + +func (r *CruiseControlRepositoryImpl) FetchPreviousAppVersion(cruiseControlIndex, appOs string) (*esapi.Response, error) { + content := getPreviousAppVersionQuery(appOs) + result, err := r.esClient.FetchESData(cruiseControlIndex, content) + if err != nil { + return nil, err + } + return result, nil +} + +func (r *CruiseControlRepositoryImpl) FetchCruiseControlConfig(appVersion, appOs, cruiseControlIndex string) (*esapi.Response, error) { + content := getCruiseControlQuery(appVersion, appOs) + result, err := r.esClient.FetchESData(cruiseControlIndex, content) + if err != nil { + return nil, err + } + return result, nil +} +func (r *CruiseControlRepositoryImpl) FetchAllCruiseControlAppVersions(cruiseControlIndex, appOs string) (*esapi.Response, error) { + content := getAllCruiseControlAppVersionQuery(appOs) + result, err := r.esClient.FetchESData(cruiseControlIndex, content) + if err != nil { + return nil, err + } + return result, nil +} + +func getPreviousAppVersionQuery(appOs string) string { + query := createEsQuery( + createSearchQuery(createMatchQuery("type", appOs)), + createFieldsQuery("os_config.app_version"), + createSourceQuery(false), + createSortQuery("config_time", "desc", ""), + createSizeQuery(1), + ) + return query +} + +func getCruiseControlQuery(appVersion, appOs string) string { + var termQueries []string + termQueries = append(termQueries, createTermSubQuery("os_config.app_version.keyword", appVersion)) + termQueries = append(termQueries, createTermSubQuery("type", appOs)) + mustQuery := createMustQuery(termQueries...) + return createEsQuery(createSearchQuery(createBoolQuery(mustQuery)), createSizeQuery(100)) +} + +func getAllCruiseControlAppVersionQuery(appOs string) string { + query := createEsQuery( + createSearchQuery(createMatchQuery("type", appOs)), + createFieldsQuery("os_config.app_version"), + createSourceQuery(false), + createSortQuery("os_config.app_version.keyword", "desc", ""), + createSizeQuery(10000), + ) + return query +} diff --git a/alfred/repository/deviceMetricsRepository.go b/alfred/repository/deviceMetricsRepository.go new file mode 100644 index 0000000..48581b8 --- /dev/null +++ b/alfred/repository/deviceMetricsRepository.go @@ -0,0 +1,76 @@ +package repository + +import ( + "alfred/internal/metrics" + "alfred/model/core" + "alfred/pkg/es7" + "alfred/pkg/log" + "alfred/utils" + "context" + "encoding/json" + "fmt" + "github.com/elastic/go-elasticsearch/v8/esapi" + "go.uber.org/zap" + "strings" +) + +type DeviceMetricsRepository interface { + InsertDeviceMetrics(deviceMetrics *core.DeviceMetricsModel, index string) error + GetDeviceMetrics(startTime int64, endTime int64, snapshotPerSecond int64, client string, index []string) (*esapi.Response, error) +} + +type DeviceMetricsRepositoryImpl struct { + esClient es7.ElasticSearchClient +} + +func NewDeviceMetricsRepository(esClient es7.ElasticSearchClient) DeviceMetricsRepository { + return &DeviceMetricsRepositoryImpl{ + esClient: esClient, + } +} + +func (dm *DeviceMetricsRepositoryImpl) InsertDeviceMetrics(deviceMetrics *core.DeviceMetricsModel, deviceIndex string) error { + + year, month, day := utils.GetCurrentDate() + index := deviceIndex + fmt.Sprintf("-%d-%d-%d", year, month, day) + + body, err := json.Marshal(deviceMetrics) + if err != nil { + metrics.ElasticSearchIngestionFailureCounter.WithLabelValues(deviceIndex).Inc() + log.Error("Pushing to Device Metrics Index Failed", zap.Any("Body:", body), zap.Error(err)) + return err + } + + req := esapi.IndexRequest{ + Index: index, + Body: strings.NewReader(string(body)), + } + + indexResponse, err := req.Do(context.Background(), dm.esClient.GetESClient()) + if err != nil { + log.Error("Insert For Device Metrics Failed", zap.Error(err)) + metrics.DeviceMetricsUpdateFailureCounter.WithLabelValues(deviceIndex).Inc() + return err + } + defer indexResponse.Body.Close() + metrics.DeviceMetricsUpdateSuccessCounter.WithLabelValues(deviceIndex).Inc() + + return nil +} + +func (dm *DeviceMetricsRepositoryImpl) GetDeviceMetrics(startTime int64, endTime int64, snapshotPerSecond int64, client string, index []string) (*esapi.Response, error) { + content := getDeviceMetricsQuery(startTime, endTime, snapshotPerSecond, client) + response, err := dm.esClient.FetchESDataMultipleIndex(index, content) + if err != nil { + return nil, err + } + return response, nil +} + +func getDeviceMetricsQuery(startTime int64, endTime int64, snapshotPerSecond int64, client string) string { + mustQuery := createMustQuery(createTermSubQueryForInt("device_metrics.snapshot_per_second", snapshotPerSecond), createTermSubQuery("device_metrics.client", client)) + rangeQuery := createRangeQuery("device_metrics.created_at", startTime, endTime) + boolQuery := createBoolQuery(createMustQuery(createBoolQuery(mustQuery), rangeQuery)) + searchQuery := createSearchQuery(boolQuery) + return createEsQuery(searchQuery) +} diff --git a/alfred/repository/errorEventsRepository.go b/alfred/repository/errorEventsRepository.go new file mode 100644 index 0000000..98f1d53 --- /dev/null +++ b/alfred/repository/errorEventsRepository.go @@ -0,0 +1,178 @@ +package repository + +import ( + "alfred/internal/metrics" + "alfred/pkg/es7" + "alfred/pkg/log" + "alfred/utils" + "context" + "errors" + "fmt" + "github.com/elastic/go-elasticsearch/v8/esapi" + "go.uber.org/zap" + "strings" +) + +type ErrorEventsRepository interface { + UploadErrorEvents(message string, errorsIngestionIndex string) (int, error) + FetchSessionIdFromSessionErrorEventsWithTimeRange(errorEventsUploadIndexList []string, startTime, endTime int64, allowedErrorEventsFilter []string) (*esapi.Response, error) + FetchErrorEvents(sessionId string, errorEventsUploadIndex string) (*esapi.Response, error) + GetErrorEventsLastCronTimestamp(errorEventsTimeIndex string) (*esapi.Response, error) + UpdateErrorEventsLastCronTimestamp(timestamp int64, clientName string, errorEventsTimeIndex string) error + FetchSessionErrorEventsWithKeyValue(keyValueMap map[string][]string, errorEventsIndex string) (*esapi.Response, error) + UpdateErrorEventsInActiveBulk(errorEventsdocIdList, errorEventsindexList []string) error +} + +type ErrorEventsRepositoryImpl struct { + esClient es7.ElasticSearchClient +} + +func NewErrorEventsRepository(esClient es7.ElasticSearchClient) ErrorEventsRepository { + return &ErrorEventsRepositoryImpl{ + esClient: esClient, + } +} + +func (r *ErrorEventsRepositoryImpl) UploadErrorEvents(message string, errorsIngestionIndex string) (int, error) { + year, month, day := utils.GetCurrentDate() + index := errorsIngestionIndex + fmt.Sprintf("-%d-%d-%d", year, month, day) + + req := esapi.IndexRequest{ + Index: index, + Body: strings.NewReader(message), + } + + indexResponse, err := req.Do(context.Background(), r.esClient.GetESClient()) + if indexResponse != nil { + defer indexResponse.Body.Close() + } + if err != nil { + metrics.ElasticSearchIngestionFailureCounter.WithLabelValues(errorsIngestionIndex).Inc() + log.Error("IndexResponse is nil for UploadErrorEvents", zap.Error(err)) + return 500, err + } + + if indexResponse.IsError() { + metrics.ElasticSearchIngestionFailureCounter.WithLabelValues(errorsIngestionIndex).Inc() + err = errors.New("create error events upload data failed") + } + + if err != nil { + metrics.ElasticSearchIngestionFailureCounter.WithLabelValues(errorsIngestionIndex).Inc() + log.Error("error-events data insert document upload in ES failed", zap.String("response", fmt.Sprintf("%v", indexResponse)), zap.Error(err)) + return indexResponse.StatusCode, err + } + metrics.ElasticSearchIngestionSuccessCounter.WithLabelValues(errorsIngestionIndex).Inc() + return indexResponse.StatusCode, nil +} + +func (r *ErrorEventsRepositoryImpl) FetchSessionIdFromSessionErrorEventsWithTimeRange(errorEventsUploadIndexList []string, startTime, endTime int64, sessionErrorEventsFilter []string) (*esapi.Response, error) { + content := getSessionIdFromSessionErrorEventsWithTimeRangeQuery(startTime, endTime, sessionErrorEventsFilter) + result, err := r.esClient.FetchESDataMultipleIndex(errorEventsUploadIndexList, content) + if err != nil { + return nil, err + } + return result, nil +} + +func (r *ErrorEventsRepositoryImpl) FetchErrorEvents(sessionId string, errorEventsUploadIndex string) (*esapi.Response, error) { + content := getErrorEventsQuery(sessionId) + result, err := r.esClient.FetchESData(errorEventsUploadIndex, content) + if err != nil { + return nil, err + } + return result, nil +} + +func (r *ErrorEventsRepositoryImpl) GetErrorEventsLastCronTimestamp(errorEventsTimeIndex string) (*esapi.Response, error) { + content := getErrorEventsLastCronTimestampQuery() + result, err := r.esClient.FetchESData(errorEventsTimeIndex, content) + if err != nil { + return nil, err + } + return result, nil +} + +func (r *ErrorEventsRepositoryImpl) UpdateErrorEventsLastCronTimestamp(timestamp int64, clientName string, errorEventsTimeIndex string) error { + content := getUpdateErrorEventsLastCronTimestampQuery(timestamp, clientName) + response, err := r.esClient.UpdateESData([]string{errorEventsTimeIndex}, content) + defer response.Body.Close() + if response.IsError() { + err = errors.New("update error events last cron timestamp failed") + } + if err != nil { + return err + } + return nil +} + +func (r *ErrorEventsRepositoryImpl) FetchSessionErrorEventsWithKeyValue(keyValueMap map[string][]string, errorEventsUploadIndex string) (*esapi.Response, error) { + content := getSessionErrorEventsFromSessionIdQuery(keyValueMap) + result, err := r.esClient.FetchESData(errorEventsUploadIndex, content) + if err != nil { + return nil, err + } + return result, nil +} + +func (r *ErrorEventsRepositoryImpl) UpdateErrorEventsInActiveBulk(errorEventsdocIdList, errorEventsindexList []string) error { + content := getUpdateErrorEventsInActiveBulkQuery(errorEventsdocIdList, errorEventsindexList) + result, err := r.esClient.UpdateESDataBulk(content, errorEventsindexList) + if result.IsError() { + err = errors.New("update error events in active bulk failed") + } + if err != nil { + return err + } + defer result.Body.Close() + return nil +} + +func getSessionIdFromSessionErrorEventsWithTimeRangeQuery(startTime int64, endTime int64, sessionErrorEventsFilter []string) string { + var queryList []string + queryList = append(queryList, createRangeQuery("error_event.error_timestamp", startTime, endTime)) + queryList = append(queryList, createTermsSubQuery("error_event.error_name", sessionErrorEventsFilter)) + mustNotQuery := createMustNotQuery(createTermSubQueryBool("error_event.is_active", false)) + boolQuery := createBoolQuery(createMustQuery(queryList...), mustNotQuery) + searchQuery := createSearchQuery(boolQuery) + aggregationQuery := buildAggregationQuery("buckets", createTermsAggregationQuery("error_attributes.session_id", utils.SessionUpperLimit)) + return createEsQuery(createSizeQuery(0), searchQuery, createAggregationQuery(aggregationQuery)) +} + +func getErrorEventsQuery(sessionId string) string { + mustQuery := createMustQuery(createTermSubQuery("error_attributes.session_id", sessionId)) + boolQuery := createBoolQuery(mustQuery) + return createEsQuery(createSearchQuery(boolQuery)) +} + +func getErrorEventsLastCronTimestampQuery() string { + return createEsQuery(createSearchQuery(createMatchAllQuery())) +} + +func getUpdateErrorEventsLastCronTimestampQuery(timestamp int64, clientName string) string { + source := fmt.Sprintf(`ctx._source.%s = %dL`, clientName, timestamp) + scriptQuery := createScriptQuery(source) + searchQuery := createSearchQuery(createMatchAllQuery()) + return createEsQuery(searchQuery, scriptQuery) +} + +func getSessionErrorEventsFromSessionIdQuery(keyValueMap map[string][]string) string { + var queryList []string + for key, value := range keyValueMap { + queryList = append(queryList, createTermsSubQuery(key, value)) + } + mustQuery := createMustQuery(queryList...) + mustNotQuery := createMustNotQuery(createTermSubQueryBool("error_event.is_active", false)) + searchQuery := createSearchQuery(createBoolQuery(mustQuery, mustNotQuery)) + return createEsQuery(searchQuery, createSizeQuery(utils.EsUpperLimit)) +} + +func getUpdateErrorEventsInActiveBulkQuery(errorEventsdocIdList, errorEventsindexList []string) string { + var updateQuery []string + for sliceIndex, docId := range errorEventsdocIdList { + bulkUpdateQueryBool := createBulkUpdateQuery(errorEventsindexList[sliceIndex], docId) + scriptQuery := createEsQuery(createScriptQuery("ctx._source.error_event.is_active = " + fmt.Sprintf("%t", false))) + updateQuery = append(updateQuery, strings.Join([]string{bulkUpdateQueryBool, scriptQuery}, utils.NEWLINE)) + } + return strings.Join(updateQuery, utils.NEWLINE) + utils.NEWLINE +} diff --git a/alfred/repository/es_query_util.go b/alfred/repository/es_query_util.go new file mode 100644 index 0000000..4cf0632 --- /dev/null +++ b/alfred/repository/es_query_util.go @@ -0,0 +1,214 @@ +package repository + +import ( + "alfred/utils" + "fmt" + "strings" +) + +const ( + TermQuery = `{ "term": { "%s": "%s" } }` + TermQueryForInt = `{ "term": { "%s": %d } }` + TermQueryForBool = `{ "term": { "%s": %t } }` + RangeQuery = `{ "range": { "%s": { "gte": %d, "lte": %d } } }` + RangeQueryGteString = `{ "range": { "%s": { "gte": "%s" } } }` + MustQuery = `"must": [ %s ] ` + MustNotQuery = `"must_not": [ %s ]` + ShouldQuery = `"should": [ %s ] ` + BoolQuery = `{ "bool":{ %s } }` + SortQuery = `"sort": [ { "%s": { "order": "%s" } } ]` + CollapseQuery = `"collapse": { "field": "%s" }` + MatchAllQuery = `{ "match_all": {} }` + FromQuery = `"from": %d` + SizeQuery = `"size": %d` + SearchQuery = `"query": %s` + FieldsQuery = `"fields": [ "%s" ]` + EsQuery = "{ %s }" + sourceQuery = `"_source": %t` + AggregationQuery = `"aggs": { %s }` + AggregationQueryFormat = `"%s": { %s }` // aggregation name, aggregation query + TermsAggregationQuery = `"terms": { "field": "%s", "size": %d }` + MinAggregationQuery = `"min": { "field": "%s" }` + MaxAggregationQuery = `"max": { "field": "%s" }` + CardinalityAggregationQuery = `"cardinality": { "field": "%s" }` + FilterAggregationQuery = `"filter": %s` + TrackTotalHitsQuery = `"track_total_hits": %t` + ScriptQuery = `"script": { "source": "%s" , "lang": "painless" }` + TermsAggregationQueryWithOrder = `"terms": { "field": "%s", "size": %d, "order" : { "%s" : "%s" } }` + TermsAggregationQueryWithoutSize = `"terms": { "field": "%s" }` + CustomQuery = `"%s": %s` + BulkUpdateQuery = `{ "update": {"_index": "%s", "_id": "%s" } }` + TermsSubQuery = `{ "terms": { "%s": [ %s ] } }` + MultiSortQuery = `"sort":[ %s ]` + SortQueryFields = `{ "%s": { "order": "%s" } }` + MatchQuery = `{ "match": { "%s": "%s" } }` +) + +func createCustomQuery(key string, query string) string { + return fmt.Sprintf(CustomQuery, key, query) +} + +func createTermSubQuery(key string, ids ...string) string { + var termQueries []string + for _, id := range ids { + idCopy := id + termQueries = append(termQueries, fmt.Sprintf(TermQuery, key, idCopy)) + } + return strings.Join(termQueries, ",") +} + +func createTermSubQueryForInt(key string, ids ...int64) string { + var termQueries []string + for _, id := range ids { + idCopy := id + termQueries = append(termQueries, fmt.Sprintf(TermQueryForInt, key, idCopy)) + } + return strings.Join(termQueries, ",") +} + +func createTermSubQueryBool(key string, ids ...bool) string { + var termQueries []string + for _, id := range ids { + idCopy := id + termQueries = append(termQueries, fmt.Sprintf(TermQueryForBool, key, idCopy)) + } + return strings.Join(termQueries, ",") +} + +func createRangeQuery(key string, greaterThan int64, lessThan int64) string { + return fmt.Sprintf(RangeQuery, key, greaterThan, lessThan) +} + +func createRangeQueryForGteString(key string, greaterThan string) string { + return fmt.Sprintf(RangeQueryGteString, key, greaterThan) +} + +func createMustQuery(filters ...string) string { + return fmt.Sprintf(MustQuery, strings.Join(filters, ",")) +} + +func createMatchAllQuery() string { + return MatchAllQuery +} + +func createMatchQuery(key string, value string) string { + return fmt.Sprintf(MatchQuery, key, value) +} + +func createShouldQuery(filters ...string) string { + return fmt.Sprintf(ShouldQuery, strings.Join(filters, ",")) +} + +func createBoolQuery(filters ...string) string { + return fmt.Sprintf(BoolQuery, strings.Join(filters, ",")) +} + +func createSearchQuery(filters ...string) string { + return fmt.Sprintf(SearchQuery, strings.Join(filters, ",")) +} + +func createFieldsQuery(fields ...string) string { + return fmt.Sprintf(FieldsQuery, strings.Join(fields, ",")) +} + +func createSortQuery(key string, order string, format string) string { + if format != utils.EMPTY { + order += fmt.Sprintf(`", "format": "%s`, format) + } + return fmt.Sprintf(SortQuery, key, order) +} + +func createCollapseQuery(key string) string { + return fmt.Sprintf(CollapseQuery, key) +} + +func createCardinalityAggregationQuery(field string) string { + return fmt.Sprintf(CardinalityAggregationQuery, field) +} + +func createFromQuery(from int64) string { + return fmt.Sprintf(FromQuery, from) +} + +func createSizeQuery(size int64) string { + return fmt.Sprintf(SizeQuery, size) +} + +func createEsQuery(query ...string) string { + return fmt.Sprintf(EsQuery, strings.Join(query, ",")) +} + +func createSourceQuery(source bool) string { + return fmt.Sprintf(sourceQuery, source) +} + +func createFilterAggregationQuery(value ...string) string { + return fmt.Sprintf(FilterAggregationQuery, strings.Join(value, ",")) +} + +func createAggregationQuery(aggregations ...string) string { + return fmt.Sprintf(AggregationQuery, strings.Join(aggregations, ",")) +} + +func createMinAggregationQuery(field string) string { + return fmt.Sprintf(MinAggregationQuery, field) +} + +func createMaxAggregationQuery(field string) string { + return fmt.Sprintf(MaxAggregationQuery, field) +} + +func createTermsAggregationQuery(field string, size int) string { + return fmt.Sprintf(TermsAggregationQuery, field, size) +} + +func createTermsAggregationQueryWithoutSize(field string) string { + return fmt.Sprintf(TermsAggregationQueryWithoutSize, field) +} + +func buildAggregationQuery(aggregationName string, aggregationQueries ...string) string { + return fmt.Sprintf(AggregationQueryFormat, aggregationName, strings.Join(aggregationQueries, ",")) +} + +func createTrackTotalHitsQuery(trackTotalHits bool) string { + return fmt.Sprintf(TrackTotalHitsQuery, trackTotalHits) +} + +func createBoolShouldQuery(queries ...string) string { + return fmt.Sprintf(createBoolQuery(createShouldQuery(strings.Join(queries, ",")))) +} + +func createScriptQuery(script string) string { + return fmt.Sprintf(ScriptQuery, script) +} + +func createMustNotQuery(filters ...string) string { + return fmt.Sprintf(MustNotQuery, strings.Join(filters, ",")) +} + +func createTermsAggregationQueryWithOrder(field string, size int, filter, order string) string { + return fmt.Sprintf(TermsAggregationQueryWithOrder, field, size, filter, order) +} + +func createBulkUpdateQuery(index, docId string) string { + return fmt.Sprintf(BulkUpdateQuery, index, docId) +} + +func createTermsSubQuery(key string, ids []string) string { + var idsList []string + for _, id := range ids { + idsList = append(idsList, fmt.Sprintf(`"%s"`, id)) + } + return fmt.Sprintf(TermsSubQuery, key, strings.Join(idsList, ",")) +} + +func createSortQueryFields(key string, order string, format string) string { + if format != utils.EMPTY { + order += fmt.Sprintf(`", "format": "%s`, format) + } + return fmt.Sprintf(SortQueryFields, key, order) +} + +func createMultiSortQuery(sortQueries ...string) string { + return fmt.Sprintf(MultiSortQuery, strings.Join(sortQueries, ",")) +} diff --git a/alfred/repository/eventsRepository.go b/alfred/repository/eventsRepository.go new file mode 100644 index 0000000..6eceeca --- /dev/null +++ b/alfred/repository/eventsRepository.go @@ -0,0 +1,235 @@ +package repository + +import ( + "alfred/internal/metrics" + "alfred/model/es" + "alfred/pkg/es7" + "alfred/pkg/log" + "alfred/utils" + "context" + "errors" + "fmt" + "github.com/elastic/go-elasticsearch/v8/esapi" + "go.uber.org/zap" + "strings" +) + +type EventsRepository interface { + CreateEventIngester(message string, eventIngestionIndex string) (int, error) + FetchEventsWithLabels(labels, appName, screenName, fragmentName, vertical, appVersion, deviceIds []string, snapshotPerSecond []int64, startTime, endTime int64, page *es.Page, eventIngestionIndexList []string, phoneNumber []string, customerId []string, codePushVersion []string, agentEmailId []string, appOs string) (*esapi.Response, error) + FetchEventsFromSessionId(sessionId string, eventIngestionIndex string, withAggregation bool) (*esapi.Response, error) + FetchUniqueKeys(key string, eventIngestionIndex string) (*esapi.Response, error) + FetchAllEventsFromSession(sessionId string, eventIngestionIndex string) (*esapi.Response, error) + FetchEventsFromSession(sessionId string, eventIngestionIndex, indexName string) (*esapi.Response, error) + FetchZipsFromSession(sessionId string, eventIngestionIndex string, indexName string) (*esapi.Response, error) + FetchUniqueKeysWithFilters(key, eventIngestionIndex, minAppVersion string) (*esapi.Response, error) +} + +type EventsRepositoryImpl struct { + esClient es7.ElasticSearchClient +} + +func NewEventsRepository(esClient es7.ElasticSearchClient) EventsRepository { + return &EventsRepositoryImpl{ + esClient: esClient, + } +} + +func (r *EventsRepositoryImpl) CreateEventIngester(message string, eventIngestionIndex string) (int, error) { + year, month, day := utils.GetCurrentDate() + index := eventIngestionIndex + fmt.Sprintf("-%d-%d-%d", year, month, day) + req := esapi.IndexRequest{ + Index: index, + Body: strings.NewReader(message), + } + + indexResponse, err := req.Do(context.Background(), r.esClient.GetESClient()) + if indexResponse != nil { + defer indexResponse.Body.Close() + } + if err != nil { + metrics.ElasticSearchIngestionFailureCounter.WithLabelValues(eventIngestionIndex).Inc() + log.Error("IndexResponse is nil for CreateEventIngester", zap.Error(err)) + return 500, err + } + + if indexResponse.IsError() { + metrics.ElasticSearchIngestionFailureCounter.WithLabelValues(eventIngestionIndex).Inc() + err = errors.New("create event upload data failed") + } + + if err != nil { + metrics.ElasticSearchIngestionFailureCounter.WithLabelValues(eventIngestionIndex).Inc() + log.Error("event insertion document in ES failed", zap.String("response", fmt.Sprintf("%v", indexResponse)), zap.Error(err)) + return indexResponse.StatusCode, err + } + metrics.ElasticSearchIngestionSuccessCounter.WithLabelValues(eventIngestionIndex).Inc() + + return indexResponse.StatusCode, nil +} + +func (r *EventsRepositoryImpl) FetchEventsWithLabels(labels, appName, screenName, fragmentName, vertical, appVersion, deviceIds []string, snapshotPerSecond []int64, startTime, endTime int64, page *es.Page, eventIngestionIndexList []string, phoneNumber []string, customerId []string, codePushVersion []string, agentEmailId []string, appOs string) (*esapi.Response, error) { + var content string + + if endTime == 0 { + content = getPaginatedEventsFromLabelsAndDeviceIdsQueryWithoutTimeRange(labels, appName, screenName, fragmentName, vertical, appVersion, deviceIds, snapshotPerSecond, customerId, phoneNumber, codePushVersion, agentEmailId, page, appOs) + } else { + content = getPaginatedEventsFromLabelsAndDeviceIdsQuery(labels, appName, screenName, fragmentName, vertical, appVersion, deviceIds, snapshotPerSecond, customerId, phoneNumber, codePushVersion, agentEmailId, startTime, endTime, page, appOs) + } + + result, err := r.esClient.FetchESDataMultipleIndex(eventIngestionIndexList, content) + if err != nil { + return nil, err + } + return result, nil +} + +func (r *EventsRepositoryImpl) FetchEventsFromSessionId(sessionId string, eventIngestionIndex string, withAggregation bool) (*esapi.Response, error) { + content := getEventsFromSessionIdQuery(sessionId) + if !withAggregation { + content = getEventsFromSessionIdQueryWithoutAggregation(sessionId) + } + result, err := r.esClient.FetchESData(eventIngestionIndex, content) + if err != nil { + return nil, err + } + return result, nil +} + +func (r *EventsRepositoryImpl) FetchUniqueKeys(key string, eventIngestionIndex string) (*esapi.Response, error) { + content := getUniqueValuesQueryFromEvents(key) + result, err := r.esClient.FetchESData(eventIngestionIndex, content) + if err != nil { + return nil, err + } + return result, nil +} + +func (r *EventsRepositoryImpl) FetchUniqueKeysWithFilters(key, eventIngestionIndex, minAppVersion string) (*esapi.Response, error) { + content := getFetchUniqueKeysWithFilterQuery(key, minAppVersion) + result, err := r.esClient.FetchESData(eventIngestionIndex, content) + if err != nil { + return nil, err + } + return result, nil +} + +func (r *EventsRepositoryImpl) FetchAllEventsFromSession(sessionId string, eventIngestionIndex string) (*esapi.Response, error) { + content := getAllEventsForSessionQuery(sessionId, utils.EMPTY) + result, err := r.esClient.FetchESData(eventIngestionIndex, content) + if err != nil { + return nil, err + } + return result, nil +} + +func (r *EventsRepositoryImpl) FetchEventsFromSession(sessionId string, eventIngestionIndex, indexName string) (*esapi.Response, error) { + content := getAllEventsForSessionQuery(sessionId, indexName) + result, err := r.esClient.FetchESData(eventIngestionIndex, content) + if err != nil { + return nil, err + } + return result, nil +} + +func (r *EventsRepositoryImpl) FetchZipsFromSession(sessionId string, eventIngestionIndex string, indexName string) (*esapi.Response, error) { + content := getZipsFromSessionQuery(sessionId, indexName) + result, err := r.esClient.FetchESData(eventIngestionIndex, content) + if err != nil { + return nil, err + } + return result, nil +} + +func getPaginatedEventsFromLabelsAndDeviceIdsQuery(labels, appName, screenName, fragmentName, vertical, appVersion, deviceIds []string, snapshotPerSecond []int64, customerId, phoneNumber, codePushVersion, agentEmailId []string, startTimestamp, endTimestamp int64, page *es.Page, appOs string) string { + multipleShouldQuery := []string{createBoolShouldQuery(createTermSubQuery("events.event_name", labels...)), + createBoolShouldQuery(createTermSubQuery("base_attributes.device_id", deviceIds...)), + createBoolShouldQuery(createTermSubQuery("base_attributes.app_version_name", appName...)), + createBoolShouldQuery(createTermSubQuery("events.screen_name", screenName...)), + createBoolShouldQuery(createTermSubQuery("events.module_name", vertical...)), + createBoolShouldQuery(createTermSubQuery("base_attributes.app_version_code", appVersion...)), + createBoolShouldQuery(createTermSubQuery("base_attributes.metadata.phone_number.keyword", phoneNumber...)), + createBoolShouldQuery(createTermSubQuery("base_attributes.metadata.code_push_version.keyword", codePushVersion...)), + createBoolShouldQuery(createTermSubQuery("base_attributes.metadata.agent_email_id.keyword", agentEmailId...))} + appOsMatchQuery := createMatchQuery("base_attributes.app_os", appOs) + shouldQuery := strings.Join(multipleShouldQuery, ",") + rangeQuery := createRangeQuery("events.event_timestamp", startTimestamp, endTimestamp) + query := createEsQuery( + createSearchQuery(createBoolQuery(createMustQuery(shouldQuery, rangeQuery, appOsMatchQuery))), + createSortQuery("events.event_timestamp", string(page.SortDirection), utils.EMPTY), + createCollapseQuery("base_attributes.session_id"), + createFromQuery(page.PageSize*page.PageNumber), + createSizeQuery(page.PageSize), + ) + return query +} + +func getPaginatedEventsFromLabelsAndDeviceIdsQueryWithoutTimeRange(labels, appName, screenName, fragmentName, vertical, appVersion, deviceIds []string, snapshotPerSecond []int64, customerId, phoneNumber, codePushVersion, agentEmailId []string, page *es.Page, appOs string) string { + multipleShouldQuery := []string{createBoolShouldQuery(createTermSubQuery("events.event_name", labels...)), + createBoolShouldQuery(createTermSubQuery("base_attributes.device_id", deviceIds...)), + createBoolShouldQuery(createTermSubQuery("base_attributes.app_version_name", appName...)), + createBoolShouldQuery(createTermSubQuery("events.screen_name", screenName...)), + createBoolShouldQuery(createTermSubQuery("events.module_name", vertical...)), + createBoolShouldQuery(createTermSubQuery("base_attributes.app_version_code", appVersion...)), + createBoolShouldQuery(createTermSubQuery("base_attributes.metadata.phone_number.keyword", phoneNumber...)), + createBoolShouldQuery(createTermSubQuery("base_attributes.metadata.code_push_version.keyword", codePushVersion...)), + createBoolShouldQuery(createTermSubQuery("base_attributes.metadata.agent_email_id.keyword", agentEmailId...))} + appOsMatchQuery := createMatchQuery("base_attributes.app_os", appOs) + shouldQuery := strings.Join(multipleShouldQuery, ",") + query := createEsQuery( + createSearchQuery(createBoolQuery(createMustQuery(shouldQuery, appOsMatchQuery))), + createSortQuery("events.event_timestamp", string(page.SortDirection), utils.EMPTY), + createCollapseQuery("base_attributes.session_id"), + createFromQuery(page.PageSize*page.PageNumber), + createSizeQuery(page.PageSize), + ) + return query +} + +func getEventsFromSessionIdQuery(sessionID string) string { + termQuery := createTermSubQuery("base_attributes.session_id", sessionID) + boolQuery := createBoolQuery(createShouldQuery(termQuery)) + searchQuery := createSearchQuery(boolQuery) + sortQuery := createSortQuery("events.event_timestamp", "asc", utils.EMPTY) + filterQuery := createFilterAggregationQuery(createTermSubQuery("events.event_type", "TOUCH_EVENT")) + aggregationQuery := createAggregationQuery(buildAggregationQuery("filter_data", filterQuery)) + return createEsQuery(searchQuery, sortQuery, aggregationQuery, createSizeQuery(utils.EventsUpperLimit)) +} + +func getEventsFromSessionIdQueryWithoutAggregation(sessionID string) string { + termQuery := createTermSubQuery("base_attributes.session_id", sessionID) + boolQuery := createBoolQuery(createShouldQuery(termQuery)) + searchQuery := createSearchQuery(boolQuery) + sortQuery := createSortQuery("events.event_timestamp", "asc", utils.EMPTY) + return createEsQuery(searchQuery, sortQuery, createSizeQuery(utils.EventsUpperLimit)) +} + +func getUniqueValuesQueryFromEvents(fieldName string) string { + termsAggregation := createTermsAggregationQuery(fieldName, utils.EventsUpperLimit) + aggregationQuery := createAggregationQuery(buildAggregationQuery("buckets", termsAggregation)) + return createEsQuery(aggregationQuery, createSizeQuery(0), `"track_total_hits": false`) + +} + +func getAllEventsForSessionQuery(sessionID, indexName string) string { + termQuery := createTermSubQuery("events.session_id", sessionID) + boolQuery := createBoolQuery(createShouldQuery(termQuery)) + sortQuery := createSortQuery("events.event_timestamp", "asc", utils.EMPTY) + return createEsQuery(createSearchQuery(boolQuery), sortQuery, createSizeQuery(utils.EventsUpperLimit)) +} + +func getFetchUniqueKeysWithFilterQuery(key, minAppVersion string) string { + searchQuery := createSearchQuery(createRangeQueryForGteString("base_attributes.app_version_code", minAppVersion)) + termsAggregation := createTermsAggregationQuery(key, utils.EventsUpperLimit) + aggregationQuery := createAggregationQuery(buildAggregationQuery("buckets", termsAggregation)) + return createEsQuery(searchQuery, aggregationQuery, createSizeQuery(0), `"track_total_hits": false`) +} + +func getZipsFromSessionQuery(sessionID string, index string) string { + sizeQuery := createSizeQuery(0) + searchQuery := createSearchQuery(createTermSubQuery("events.session_id", sessionID)) + minAggQuery := createAggregationQuery(buildAggregationQuery("min_event_timestamp", createMinAggregationQuery("events.event_timestamp"))) + termQueryWithOrder := buildAggregationQuery("unique_zipNames", createTermsAggregationQueryWithOrder("events.zip_name.keyword", utils.EventsUpperLimit, "min_event_timestamp", "asc"), minAggQuery) + aggQuery := createAggregationQuery(termQueryWithOrder) + return createEsQuery(searchQuery, aggQuery, sizeQuery) +} diff --git a/alfred/repository/repositories.go b/alfred/repository/repositories.go new file mode 100644 index 0000000..25a8c2e --- /dev/null +++ b/alfred/repository/repositories.go @@ -0,0 +1,69 @@ +package repository + +import ( + "alfred/config" + "alfred/pkg/es7" +) + +type Repository struct { + SessionsRepository SessionsRepository + WebSessionsRepository WebSessionsRepository + EventsRepository EventsRepository + CruiseControlRepository CruiseControlRepository + AppFragmentRepository AppFragmentsRepository + ErrorEventsRepository ErrorEventsRepository + ShedlockRepository ShedlockRepository + VideoGenerationStatusRepository VideoGenerationStatusRepository + DeviceMetricsRepository DeviceMetricsRepository +} + +func initSessionsRepository(esClient es7.ElasticSearchClient) SessionsRepository { + return NewSessionsRepository(esClient) +} + +func initWebSessionsRepository(esClient es7.ElasticSearchClient) WebSessionsRepository { + return NewWebSessionsRepository(esClient) +} + +func initEventsRepository(esClient es7.ElasticSearchClient) EventsRepository { + return NewEventsRepository(esClient) +} + +func initCruiseControlRepository(esClient es7.ElasticSearchClient) CruiseControlRepository { + return NewCruiseControlRepository(esClient) +} + +func initAppFragmentsRepository(esClient es7.ElasticSearchClient) AppFragmentsRepository { + return NewAppFragmentsRepository(esClient) +} + +func initErrorEventsRepository(esClient es7.ElasticSearchClient) ErrorEventsRepository { + return NewErrorEventsRepository(esClient) +} + +func InitShedlockRepository(esClient es7.ElasticSearchClient) ShedlockRepository { + return NewShedlockRepository(esClient) +} + +func initVideoFragmentStatusRepository(esClient es7.ElasticSearchClient) VideoGenerationStatusRepository { + return NewVideoGenerationStatusRepository(esClient) +} + +func initDeviceMetricsRepository(esClient es7.ElasticSearchClient) DeviceMetricsRepository { + return NewDeviceMetricsRepository(esClient) +} + +func InitRepositories(elasticsearchBaseConfig config.ElasticSearchBaseConfig) *Repository { + esClient := es7.NewEsConfig(elasticsearchBaseConfig) + return &Repository{ + SessionsRepository: initSessionsRepository(esClient), + WebSessionsRepository: initWebSessionsRepository(esClient), + EventsRepository: initEventsRepository(esClient), + CruiseControlRepository: initCruiseControlRepository(esClient), + AppFragmentRepository: initAppFragmentsRepository(esClient), + ErrorEventsRepository: initErrorEventsRepository(esClient), + ShedlockRepository: InitShedlockRepository(esClient), + VideoGenerationStatusRepository: initVideoFragmentStatusRepository(esClient), + DeviceMetricsRepository: initDeviceMetricsRepository(esClient), + } +} diff --git a/alfred/repository/sessionRepository.go b/alfred/repository/sessionRepository.go new file mode 100644 index 0000000..40b073c --- /dev/null +++ b/alfred/repository/sessionRepository.go @@ -0,0 +1,276 @@ +package repository + +import ( + "alfred/internal/metrics" + "alfred/model/es" + "alfred/model/ingester" + "alfred/pkg/es7" + "alfred/pkg/log" + "alfred/utils" + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "github.com/elastic/go-elasticsearch/v8/esapi" + "go.uber.org/zap" + "strings" +) + +type SessionsRepository interface { + UploadSession(alfredSessionRecordingEvent ingester.SessionUploadRequest, sessionUploadIndex string) (int, error) + FetchSessionWithTimeRange(startTimestamp, endTimestamp int64, page *es.Page, sessionUploadIndexList []string, sortBy, appOs string) (*esapi.Response, error) + FetchSessionsWithSessionIds(sessionIds []string, sessionUploadIndex string) (*esapi.Response, error) + FetchSessionAndSessionDurationWithSessionIds(sessionIds []string, sessionUploadIndex string, page *es.Page, sortBy string) (*esapi.Response, error) + FetchDeviceAttributesForMetrics(startTimestamp int64, endTimestamp int64, snapshotPerSecond int64, sessionUploadIndex string, page *es.Page) (*esapi.Response, error) + UpdateSessionErrorEventsWithSessionId(sessionId []string, sessionUploadIndexList []string, hasErrors bool) error + FetchSessionListFromSessionIds(sessionIds []string, sessionUploadIndex string, page *es.Page, sortBy string) (*esapi.Response, error) + FetchSessionWithLabels(appVersion []string, deviceIds []string, startTimestamp, endTimestamp int64, page *es.Page, sessionUploadSearchIndexList []string, appOs string) (*esapi.Response, error) + FetchUniqueKeys(key string, sessionUploadIndex string) (*esapi.Response, error) +} + +type SessionsRepositoryImpl struct { + esClient es7.ElasticSearchClient +} + +func NewSessionsRepository(esClient es7.ElasticSearchClient) SessionsRepository { + return &SessionsRepositoryImpl{ + esClient: esClient, + } +} + +func (r *SessionsRepositoryImpl) UploadSession(alfredSessionRecordingEvent ingester.SessionUploadRequest, sessionUploadIndex string) (int, error) { + year, month, day := utils.GetCurrentDate() + index := sessionUploadIndex + fmt.Sprintf("-%d-%d-%d", year, month, day) + message, err := json.Marshal(alfredSessionRecordingEvent) + if err != nil { + metrics.ElasticSearchIngestionFailureCounter.WithLabelValues(sessionUploadIndex).Inc() + log.Error("error ingesting app session", zap.Error(err)) + return 0, err + } + + req := esapi.IndexRequest{ + Index: index, + DocumentID: alfredSessionRecordingEvent.SessionUploadEventAttributes.EventId, + Body: bytes.NewReader(message), + } + + indexResponse, err := req.Do(context.Background(), r.esClient.GetESClient()) + if indexResponse != nil { + defer indexResponse.Body.Close() + } + if err != nil { + metrics.ElasticSearchIngestionFailureCounter.WithLabelValues(sessionUploadIndex).Inc() + log.Error("IndexResponse is nil for UploadSession", zap.Error(err)) + return 500, err + } + + if indexResponse.IsError() { + metrics.ElasticSearchIngestionFailureCounter.WithLabelValues(sessionUploadIndex).Inc() + err = errors.New("create session upload data failed") + } + + if err != nil { + metrics.ElasticSearchIngestionFailureCounter.WithLabelValues(sessionUploadIndex).Inc() + log.Error("session upload data insert document in ES failed", zap.String("response", fmt.Sprintf("%v", indexResponse)), zap.Error(err)) + return indexResponse.StatusCode, err + } + metrics.ElasticSearchIngestionSuccessCounter.WithLabelValues(sessionUploadIndex).Inc() + return indexResponse.StatusCode, nil +} + +func (r *SessionsRepositoryImpl) FetchSessionWithTimeRange(startTimestamp, endTimestamp int64, page *es.Page, sessionUploadIndexList []string, sortBy, appOs string) (*esapi.Response, error) { + content := getAllPaginatedSessionsQuery(startTimestamp, endTimestamp, page, sortBy, appOs) + result, err := r.esClient.FetchESDataMultipleIndex(sessionUploadIndexList, content) + if err != nil { + return nil, err + } + return result, err +} + +func (r *SessionsRepositoryImpl) FetchSessionsWithSessionIds(sessionIds []string, sessionUploadIndex string) (*esapi.Response, error) { + content := getSessionsFromSessionIdsQuery(sessionIds) + result, err := r.esClient.FetchESData(sessionUploadIndex, content) + if err != nil { + return nil, err + } + return result, err +} + +func (r *SessionsRepositoryImpl) FetchSessionAndSessionDurationWithSessionIds(sessionIds []string, sessionUploadIndex string, page *es.Page, sortBy string) (*esapi.Response, error) { + content := getSessionAndSessionDurationFromSessionIdQuery(sessionIds, page, sortBy) + result, err := r.esClient.FetchESData(sessionUploadIndex, content) + if err != nil { + return nil, err + } + return result, err +} + +func (r *SessionsRepositoryImpl) FetchDeviceAttributesForMetrics(startTimestamp int64, endTimestamp int64, snapshotPerSecond int64, sessionUploadIndex string, page *es.Page) (*esapi.Response, error) { + content := getAggsOnSessionIdsForDeviceAttributes(startTimestamp, endTimestamp, snapshotPerSecond, page) + result, err := r.esClient.FetchESData(sessionUploadIndex, content) + if err != nil { + log.Error("Fetching Device Metrics Failed:", zap.Error(err)) + return nil, err + } + return result, err +} + +func (r *SessionsRepositoryImpl) UpdateSessionErrorEventsWithSessionId(sessionId []string, sessionUploadIndexList []string, hasErrors bool) error { + content := getUpdateSessionErrorEventsWithSessionIdQuery(sessionId, hasErrors) + response, err := r.esClient.UpdateESData(sessionUploadIndexList, content) + defer response.Body.Close() + if err != nil { + log.Error("update session error events with session id failed", zap.Error(err), zap.String("query", content)) + return err + } + if response.IsError() { + err = errors.New("update has_errors true in session failed") + log.Error("update session error events with session id failed", zap.Error(err), zap.String("response", fmt.Sprintf("%v", response)), zap.String("query", content)) + return err + } + return nil +} + +func (r *SessionsRepositoryImpl) FetchSessionListFromSessionIds(sessionIds []string, sessionUploadIndex string, page *es.Page, sortBy string) (*esapi.Response, error) { + content := getSessionListFromSessionIdsQuery(sessionIds, page, sortBy) + result, err := r.esClient.FetchESData(sessionUploadIndex, content) + if err != nil { + return nil, err + } + return result, err +} + +func (r *SessionsRepositoryImpl) FetchSessionWithLabels(appVersion []string, deviceIds []string, startTimestamp, endTimestamp int64, page *es.Page, sessionUploadSearchIndexList []string, appOs string) (*esapi.Response, error) { + var content string + if endTimestamp == 0 { + content = getPaginatedSessionsFromLabelsQueryWithoutTimeRange(appVersion, deviceIds, page, appOs) + } else { + content = getPaginatedSessionsFromLabelsQuery(appVersion, deviceIds, startTimestamp, endTimestamp, page, appOs) + } + result, err := r.esClient.FetchESDataMultipleIndex(sessionUploadSearchIndexList, content) + if err != nil { + return nil, err + } + return result, err +} + +func (r *SessionsRepositoryImpl) FetchUniqueKeys(key string, sessionUploadIndex string) (*esapi.Response, error) { + content := getUniqueValuesQueryFromSession(key) + result, err := r.esClient.FetchESData(sessionUploadIndex, content) + if err != nil { + return nil, err + } + return result, nil +} + +func getAllPaginatedSessionsQuery(startTimestamp int64, endTimestamp int64, page *es.Page, sortBy, appOs string) string { + rangeQuery := createRangeQuery("base_attributes.client_ts", startTimestamp, endTimestamp) + appOsMatchQuery := createMatchQuery("base_attributes.app_os", appOs) + boolQuery := createBoolQuery(createMustQuery(rangeQuery, appOsMatchQuery)) + searchQuery := createSearchQuery(boolQuery) + sortQuery := createSortQuery(sortBy, string(page.SortDirection), "strict_date_optional_time_nanos") + collapseQuery := createCollapseQuery("base_attributes.session_id") + return createEsQuery(searchQuery, sortQuery, collapseQuery, createFromQuery(page.PageSize*page.PageNumber), createSizeQuery(page.PageSize)) +} + +func getSessionsFromSessionIdsQuery(sessionIds []string) string { + shouldQuery := createShouldQuery(createTermSubQuery("base_attributes.session_id", sessionIds...)) + boolQuery := createBoolQuery(shouldQuery) + searchQuery := createSearchQuery(boolQuery) + sortQuery := createSortQuery("base_attributes.client_ts", "asc", "strict_date_optional_time_nanos") + return createEsQuery(searchQuery, sortQuery, createSizeQuery(utils.SessionUpperLimit)) +} + +func getSessionAndSessionDurationFromSessionIdQuery(sessionIds []string, page *es.Page, sortBy string) string { + shouldQuery := createShouldQuery(createTermSubQuery("base_attributes.session_id", sessionIds...)) + boolQuery := createBoolQuery(shouldQuery) + searchQuery := createSearchQuery(boolQuery) + sortQuery := createSortQuery(sortBy, string(page.SortDirection), "strict_date_optional_time_nanos") + termsAggregation := createTermsAggregationQuery("base_attributes.session_id", int(page.PageSize)) + minTimeStampAggregation := buildAggregationQuery("min_timestamp", createMinAggregationQuery("base_attributes.session_time_stamp")) + maxTimeStampAggregation := buildAggregationQuery("max_timestamp", createMaxAggregationQuery("base_attributes.event_end_time_stamp")) + maxClientTimestampAggregation := buildAggregationQuery("max_client_timestamp", createMaxAggregationQuery("base_attributes.client_ts")) + innerAggregationQuery := createAggregationQuery(minTimeStampAggregation, maxTimeStampAggregation, maxClientTimestampAggregation) + aggregationQuery := createAggregationQuery(buildAggregationQuery("buckets", termsAggregation, innerAggregationQuery)) + return createEsQuery(searchQuery, sortQuery, aggregationQuery, createSizeQuery(utils.SessionUpperLimit)) +} + +func getAggsOnSessionIdsForDeviceAttributes(startTimestamp int64, endTimestamp int64, snapshotPerSecond int64, page *es.Page) string { + shouldQuery := createShouldQuery(createTermSubQueryForInt("base_attributes.snapshot_per_second", snapshotPerSecond)) + rangeQuery := createRangeQuery("base_attributes.client_ts", startTimestamp, endTimestamp) + boolQuery := createBoolQuery(createMustQuery(createBoolQuery(shouldQuery), rangeQuery)) + searchQuery := createSearchQuery(boolQuery) + termsAggregation := createTermsAggregationQueryWithoutSize("base_attributes.session_id") + maxBeginningBatteryAggregation := buildAggregationQuery("max_beginning_battery", createMaxAggregationQuery("session_upload_event_attributes.beginning_device_attributes.battery")) + minEndBatteryAggregation := buildAggregationQuery("min_end_battery", createMinAggregationQuery("session_upload_event_attributes.end_device_attributes.battery")) + minBeginningMemoryAggregation := buildAggregationQuery("min_end_memory", createMinAggregationQuery("session_upload_event_attributes.end_device_attributes.memory")) + maxEndMemoryAggregation := buildAggregationQuery("max_end_memory", createMaxAggregationQuery("session_upload_event_attributes.end_device_attributes.memory")) + maxEventEndTimestampAggregation := buildAggregationQuery("max_event_end_timestamp", createMaxAggregationQuery("base_attributes.event_end_time_stamp")) + minClientTimestampAggregation := buildAggregationQuery("min_client_timestamp", createMinAggregationQuery("base_attributes.client_ts")) + innerAggregationQuery := createAggregationQuery(maxBeginningBatteryAggregation, minEndBatteryAggregation, minBeginningMemoryAggregation, maxEndMemoryAggregation, maxEventEndTimestampAggregation, minClientTimestampAggregation) + rangeQueryOnEndBattery := createRangeQuery("session_upload_event_attributes.end_device_attributes.battery", 1, 101) + filterQuery := createCustomQuery("filter", rangeQueryOnEndBattery) + aggregationQuery := createAggregationQuery(buildAggregationQuery("buckets", termsAggregation, innerAggregationQuery)) + finalAggregationQuery := createAggregationQuery(buildAggregationQuery("filter", filterQuery, aggregationQuery)) + return createEsQuery(searchQuery, finalAggregationQuery, createFromQuery(page.PageSize*page.PageNumber), createSizeQuery(0)) + +} + +func getUpdateSessionErrorEventsWithSessionIdQuery(sessionIds []string, hasErrors bool) string { + shouldQuery := createShouldQuery(createTermSubQuery("base_attributes.session_id", sessionIds...)) + mustNotQuery := createMustNotQuery(createTermSubQueryBool("base_attributes.has_errors", hasErrors)) + boolQuery := createBoolQuery(shouldQuery, mustNotQuery) + searchQuery := createSearchQuery(boolQuery) + scriptQuery := createScriptQuery("ctx._source.base_attributes.has_errors = " + fmt.Sprintf("%t", hasErrors)) + return createEsQuery(searchQuery, scriptQuery) +} + +func getSessionListFromSessionIdsQuery(sessionIds []string, page *es.Page, sortBy string) string { + shouldQuery := createShouldQuery(createTermSubQuery("base_attributes.session_id", sessionIds...)) + boolQuery := createBoolQuery(shouldQuery) + searchQuery := createSearchQuery(boolQuery) + sortQuery := createSortQuery(sortBy, string(page.SortDirection), "strict_date_optional_time_nanos") + collapseQuery := createCollapseQuery("base_attributes.session_id") + return createEsQuery(searchQuery, sortQuery, collapseQuery, createFromQuery(page.PageSize*page.PageNumber), createSizeQuery(page.PageSize)) +} + +func getPaginatedSessionsFromLabelsQuery(appVersion, deviceIds []string, startTimestamp, endTimestamp int64, page *es.Page, appOs string) string { + multipleShouldQuery := []string{ + createBoolShouldQuery(createTermSubQuery("base_attributes.device_id", deviceIds...)), + createBoolShouldQuery(createTermSubQuery("base_attributes.app_version_code", appVersion...))} + appOsMatchQuery := createMatchQuery("base_attributes.app_os", appOs) + shouldQuery := strings.Join(multipleShouldQuery, ",") + rangeQuery := createRangeQuery("created_at", startTimestamp, endTimestamp) + query := createEsQuery( + createSearchQuery(createBoolQuery(createMustQuery(shouldQuery, rangeQuery, appOsMatchQuery))), + createSortQuery("created_at", string(page.SortDirection), utils.EMPTY), + createCollapseQuery("base_attributes.session_id"), + createFromQuery(page.PageSize*page.PageNumber), + createSizeQuery(page.PageSize), + ) + return query +} + +func getPaginatedSessionsFromLabelsQueryWithoutTimeRange(appVersion []string, deviceIds []string, page *es.Page, appOs string) string { + multipleShouldQuery := []string{ + createBoolShouldQuery(createTermSubQuery("base_attributes.device_id", deviceIds...)), + createBoolShouldQuery(createTermSubQuery("base_attributes.app_version_code", appVersion...))} + appOsMatchQuery := createMatchQuery("base_attributes.app_os", appOs) + shouldQuery := strings.Join(multipleShouldQuery, ",") + query := createEsQuery( + createSearchQuery(createBoolQuery(createMustQuery(shouldQuery, appOsMatchQuery))), + createSortQuery("created_at", string(page.SortDirection), utils.EMPTY), + createCollapseQuery("base_attributes.session_id"), + createFromQuery(page.PageSize*page.PageNumber), + createSizeQuery(page.PageSize), + ) + return query +} + +func getUniqueValuesQueryFromSession(fieldName string) string { + termsAggregation := createTermsAggregationQuery(fieldName, utils.SessionUpperLimit) + aggregationQuery := createAggregationQuery(buildAggregationQuery("buckets", termsAggregation)) + return createEsQuery(aggregationQuery, createSizeQuery(0), `"track_total_hits": false`) + +} diff --git a/alfred/repository/shedlockRepository.go b/alfred/repository/shedlockRepository.go new file mode 100644 index 0000000..8a75bc9 --- /dev/null +++ b/alfred/repository/shedlockRepository.go @@ -0,0 +1,91 @@ +package repository + +import ( + "alfred/config" + "alfred/internal/metrics" + "alfred/model/common" + "alfred/pkg/es7" + "alfred/pkg/log" + "context" + "encoding/json" + "github.com/elastic/go-elasticsearch/v8/esapi" + "go.uber.org/zap" + "net/http" + "strings" +) + +type ShedlockRepository interface { + InsertShedlockForCronWithOptimisticControl(shedLock *common.ShedLock, primaryTerm *int, seqNo *int) (int, error) + InsertShedlockForCron(shedLock *common.ShedLock) error + GetShedLockStatus(cronName string) (*esapi.Response, error) +} + +type ShedlockRepositoryImpl struct { + esClient es7.ElasticSearchClient +} + +func NewShedlockRepository(esClient es7.ElasticSearchClient) ShedlockRepository { + return &ShedlockRepositoryImpl{ + esClient: esClient, + } +} + +func (sl *ShedlockRepositoryImpl) InsertShedlockForCron(shedLock *common.ShedLock) error { + + body, _ := json.Marshal(shedLock) + + req := esapi.IndexRequest{ + Index: config.GetCoreConfig().ElasticSearchConfig.ShedLockForCronIndex, + DocumentID: shedLock.Name, + Body: strings.NewReader(string(body)), + Refresh: "true", + } + + indexResponse, err := req.Do(context.Background(), sl.esClient.GetESClient()) + if err != nil { + metrics.ElasticSearchIngestionFailureCounter.WithLabelValues(config.GetCoreConfig().ElasticSearchConfig.ShedLockForCronIndex).Inc() + log.Error("Insert For Shed-lock Failed", zap.Error(err)) + return err + } + defer indexResponse.Body.Close() + metrics.ElasticSearchIngestionSuccessCounter.WithLabelValues(config.GetCoreConfig().ElasticSearchConfig.ShedLockForCronIndex).Inc() + + return nil +} + +func (sl *ShedlockRepositoryImpl) InsertShedlockForCronWithOptimisticControl(shedLock *common.ShedLock, primaryTerm *int, seqNo *int) (int, error) { + + body, _ := json.Marshal(shedLock) + + req := esapi.IndexRequest{ + Index: config.GetCoreConfig().ElasticSearchConfig.ShedLockForCronIndex, + DocumentID: shedLock.Name, + Body: strings.NewReader(string(body)), + Refresh: "true", + IfPrimaryTerm: primaryTerm, + IfSeqNo: seqNo, + } + + indexResponse, err := req.Do(context.Background(), sl.esClient.GetESClient()) + if err != nil { + log.Error("Insert For Shed-lock Failed With Optimistic Concurrency control", zap.Error(err)) + return http.StatusBadRequest, err + } + + defer indexResponse.Body.Close() + + return indexResponse.StatusCode, nil +} + +func (sl *ShedlockRepositoryImpl) GetShedLockStatus(cronName string) (*esapi.Response, error) { + + response, err := sl.esClient.FetchESDataWithDocId(config.GetCoreConfig().ElasticSearchConfig.ShedLockForCronIndex, cronName) + + if err != nil { + log.Error("Fetch Shed-lock Status Failed", zap.Error(err)) + return nil, err + } + + return response, nil + +} diff --git a/alfred/repository/videoFragmentsRepository.go b/alfred/repository/videoFragmentsRepository.go new file mode 100644 index 0000000..127cdbc --- /dev/null +++ b/alfred/repository/videoFragmentsRepository.go @@ -0,0 +1,155 @@ +package repository + +import ( + "alfred/internal/metrics" + "alfred/model/core" + "alfred/pkg/es7" + "alfred/pkg/log" + "context" + "errors" + "fmt" + "github.com/elastic/go-elasticsearch/v8/esapi" + "go.uber.org/zap" + "net/http" + "strings" +) + +type VideoGenerationStatusRepository interface { + CreateVideoGenerationStatus(videoGenerationStatus core.VideoFragmentStatusModel, message string, videoGenerationStatusIndex string) error + FetchVideoGenerationStatus(sessionId, videoGenerationStatusIndex string) (*esapi.Response, error) + UpdateFragmentVideoGenerationStatus(sessionId, videoGenerationStatusIndex, eventId, status, processedZipName string, currentFragment int64) (int, error) + UpdateVideoGenerationStatus(sessionId, videoGenerationStatusIndex, status string, videoGeneratedTillNow int) (int, error) + UpdateVideoDimensions(sessionId, videoGenerationStatusIndex string, width, height int) error +} + +type VideoGenerationStatusRepositoryImpl struct { + esClient es7.ElasticSearchClient +} + +func NewVideoGenerationStatusRepository(esClient es7.ElasticSearchClient) VideoGenerationStatusRepository { + return &VideoGenerationStatusRepositoryImpl{ + esClient: esClient, + } +} + +func (r *VideoGenerationStatusRepositoryImpl) CreateVideoGenerationStatus(fragmentData core.VideoFragmentStatusModel, message string, videoGenerationStatusIndex string) error { + req := esapi.IndexRequest{ + Index: videoGenerationStatusIndex, + DocumentID: fragmentData.SessionId, + Body: strings.NewReader(message), + } + + response, err := req.Do(context.Background(), r.esClient.GetESClient()) + if err != nil { + metrics.ElasticSearchIngestionFailureCounter.WithLabelValues(videoGenerationStatusIndex).Inc() + log.Error("video generation status insertion document in ES failed", zap.String("response", fmt.Sprintf("%v", response)), zap.Error(err)) + return err + } + defer response.Body.Close() + metrics.ElasticSearchIngestionSuccessCounter.WithLabelValues(videoGenerationStatusIndex).Inc() + + return nil +} + +func (r *VideoGenerationStatusRepositoryImpl) FetchVideoGenerationStatus(sessionId, videoGenerationStatusIndex string) (*esapi.Response, error) { + content := getVideoGenerationStatusSessQuery(sessionId) + result, err := r.esClient.FetchESData(videoGenerationStatusIndex, content) + if err != nil { + return nil, err + } + return result, nil +} + +func getVideoGenerationStatusSessQuery(sessionId string) string { + termQuery := createTermSubQuery("session_id", sessionId) + mustQuery := createMustQuery(termQuery) + return createEsQuery(createSearchQuery(createBoolQuery(mustQuery)), createSizeQuery(100)) +} + +func (r *VideoGenerationStatusRepositoryImpl) UpdateFragmentVideoGenerationStatus(sessionId, videoGenerationStatusIndex, eventId, status, processedZipName string, currentFragment int64) (int, error) { + content, err := getUpdateFragmentVideoGenerationStatusQuery(sessionId, eventId, status, processedZipName, currentFragment) + if err != nil { + return http.StatusInternalServerError, err + } + response, err := r.esClient.UpdateESData([]string{videoGenerationStatusIndex}, content) + defer response.Body.Close() + if err != nil { + return http.StatusInternalServerError, err + } + + if response.IsError() { + return response.StatusCode, errors.New("updation in ES failed") + + } + return http.StatusOK, nil +} + +func (r *VideoGenerationStatusRepositoryImpl) UpdateVideoGenerationStatus(sessionId, videoGenerationStatusIndex, status string, videoGeneratedTillNow int) (int, error) { + content, err := getUpdateVideoGenerationStatusQuery(sessionId, status, videoGeneratedTillNow) + if err != nil { + return http.StatusInternalServerError, err + } + response, err := r.esClient.UpdateESData([]string{videoGenerationStatusIndex}, content) + defer response.Body.Close() + if err != nil { + return response.StatusCode, err + } + + if response.IsError() { + return response.StatusCode, errors.New("ES Updation failed while updating video generation status") + } + return response.StatusCode, nil +} + +func (r *VideoGenerationStatusRepositoryImpl) UpdateVideoDimensions(sessionId, videoGenerationStatusIndex string, width, height int) error { + content, err := getUpdateVideoDimensionQuery(sessionId, width, height) + if err != nil { + log.Error("Error updating video dimensions", zap.Error(err)) + return err + } + response, err := r.esClient.UpdateESData([]string{videoGenerationStatusIndex}, content) + defer response.Body.Close() + if err != nil { + return err + } + return nil +} + +func getUpdateFragmentVideoGenerationStatusQuery(sessionId, eventId, status, processedZipName string, currentFragment int64) (string, error) { + script := fmt.Sprintf(`for (int i = 0; i < ctx._source.video_fragment_statuses.length; i++) {if (ctx._source.video_fragment_statuses[i].event_id == '%s') {ctx._source.video_fragment_statuses[i].zip_processing_status= '%s'; ctx._source.video_fragment_statuses[i].processed_zip_name = '%s';}} ctx._source.fragments_completed_till_now = %d;`, eventId, status, processedZipName, currentFragment) + + if currentFragment == 0 { + script = fmt.Sprintf(`for (int i = 0; i < ctx._source.video_fragment_statuses.length; i++) {if (ctx._source.video_fragment_statuses[i].event_id == '%s') {ctx._source.video_fragment_statuses[i].zip_processing_status= '%s'; ctx._source.video_fragment_statuses[i].processed_zip_name = '%s';}}`, eventId, status, processedZipName) + } + + boolQuery := createBoolQuery(createMustQuery( + createTermSubQuery("session_id", sessionId))) + + searchQuery := createSearchQuery(boolQuery) + esQuery := createEsQuery(searchQuery, createScriptQuery(string(script))) + return esQuery, nil +} + +func getUpdateVideoGenerationStatusQuery(sessionId, status string, videoGeneratedTillNow int) (string, error) { + script := fmt.Sprintf(`ctx._source.video_generation_status = '%s';ctx._source.video_generated_till_now = %d;`, status, videoGeneratedTillNow) + if videoGeneratedTillNow == 0 { + script = fmt.Sprintf(`ctx._source.video_generation_status = '%s';`, status) + } + boolQuery := createBoolQuery(createMustQuery( + createTermSubQuery("session_id", sessionId))) + + searchQuery := createSearchQuery(boolQuery) + esQuery := createEsQuery(searchQuery, createScriptQuery(script)) + return esQuery, nil +} + +func getUpdateVideoDimensionQuery(sessionId string, width, height int) (string, error) { + script := fmt.Sprintf(`ctx._source.video_width = %d;ctx._source.video_height = %d;`, width, height) + + boolQuery := createBoolQuery(createMustQuery( + createTermSubQuery("session_id", sessionId))) + + searchQuery := createSearchQuery(boolQuery) + esQuery := createEsQuery(searchQuery, createScriptQuery(script)) + return esQuery, nil +} diff --git a/alfred/repository/webSessionRepository.go b/alfred/repository/webSessionRepository.go new file mode 100644 index 0000000..9fd299c --- /dev/null +++ b/alfred/repository/webSessionRepository.go @@ -0,0 +1,166 @@ +package repository + +import ( + "alfred/api/request" + "alfred/config" + "alfred/internal/metrics" + "alfred/model/es" + "alfred/model/ingester" + "alfred/pkg/es7" + "alfred/pkg/log" + "alfred/utils" + "bytes" + "context" + "encoding/json" + "fmt" + "github.com/elastic/go-elasticsearch/v8/esapi" + "go.uber.org/zap" + "strings" +) + +type WebSessionsRepository interface { + UploadWebSession(sessionUploadRequest ingester.WebSessionUploadRequest, webSessionUploadIndex string) error + FetchWebSessionsWithSessionId(sessionId, webSessionUploadIndex string) (*esapi.Response, error) + FetchAllWebSession(filters request.WebSessionFilters, page *es.Page, webSessionUploadIndexList []string) (*esapi.Response, error) + FetchWebSessionsWithDurationResponse(webSessionIds []string, webSessionUploadIndex string, filters request.WebSessionFilters, page *es.Page) (*esapi.Response, error) +} + +type WebSessionsRepositoryImpl struct { + esClient es7.ElasticSearchClient +} + +func NewWebSessionsRepository(esClient es7.ElasticSearchClient) WebSessionsRepository { + return &WebSessionsRepositoryImpl{ + esClient: esClient, + } +} + +func (r *WebSessionsRepositoryImpl) UploadWebSession(sessionUploadRequest ingester.WebSessionUploadRequest, webSessionUploadIndex string) error { + year, month, day := utils.GetCurrentDate() + + index := webSessionUploadIndex + fmt.Sprintf("-%d-%d-%d", year, month, day) + message, err := json.Marshal(sessionUploadRequest) + if err != nil { + metrics.ElasticSearchIngestionFailureCounter.WithLabelValues(webSessionUploadIndex).Inc() + log.Error("error ingesting web session", zap.Error(err)) + return err + } + documentId := sessionUploadRequest.SessionAttributes.EventId + if sessionUploadRequest.BaseAttributes.Version >= config.GetCollectorConfig().ElasticSearchConfig.MinWebVersionSupportingSingleDoc { + documentId = sessionUploadRequest.BaseAttributes.SessionId + } + req := esapi.IndexRequest{ + Index: index, + DocumentID: documentId, + Body: bytes.NewReader(message), + } + + indexResponse, err := req.Do(context.Background(), r.esClient.GetESClient()) + if err != nil { + metrics.ElasticSearchIngestionFailureCounter.WithLabelValues(webSessionUploadIndex).Inc() + log.Error("web session ingestion failed", zap.String("response", fmt.Sprintf("%v", indexResponse)), zap.Error(err)) + return err + } + defer indexResponse.Body.Close() + + metrics.ElasticSearchIngestionSuccessCounter.WithLabelValues(webSessionUploadIndex).Inc() + + return nil +} + +func (r *WebSessionsRepositoryImpl) FetchWebSessionsWithSessionId(sessionId, webSessionUploadIndex string) (*esapi.Response, error) { + content := getWebSessionFromSessionIdQuery(sessionId) + result, err := r.esClient.FetchESData(webSessionUploadIndex, content) + if err != nil { + return nil, err + } + return result, err +} + +func (r *WebSessionsRepositoryImpl) FetchAllWebSession(filters request.WebSessionFilters, page *es.Page, webSessionUploadIndexList []string) (*esapi.Response, error) { + content := getAllPaginatedWebSessionsQuery(filters, page) + result, err := r.esClient.FetchESDataMultipleIndex(webSessionUploadIndexList, content) + if err != nil { + return nil, err + } + + return result, nil +} + +func (r *WebSessionsRepositoryImpl) FetchWebSessionsWithDurationResponse(webSessionIds []string, webSessionUploadIndex string, filters request.WebSessionFilters, page *es.Page) (*esapi.Response, error) { + content := getWebSessionWithDurationQuery(webSessionIds, filters, page) + result, err := r.esClient.FetchESData(webSessionUploadIndex, content) + if err != nil { + return nil, err + } + return result, nil +} + +func getWebSessionFromSessionIdQuery(sessionId string) string { + termFilter := createTermSubQuery("base_attributes.session_id", sessionId) + var sortQueryFields []string + sortQueryFields = append(sortQueryFields, createSortQueryFields("base_attributes.client_timestamp", "asc", "strict_date_optional_time_nanos")) + sortQueryFields = append(sortQueryFields, createSortQueryFields("session_attribute.event_id", "asc", "")) + query := createEsQuery( + createSearchQuery(createBoolQuery(createShouldQuery(termFilter))), + createMultiSortQuery(sortQueryFields...), + createSizeQuery(utils.EsUpperLimit), + ) + return query +} + +func getAllPaginatedWebSessionsQuery(filters request.WebSessionFilters, page *es.Page) string { + projectName, agentId, ticketId, sessionId, deviceId, emailId, phoneNumber := getFilterList(filters) + multipleShouldQuery := []string{createBoolShouldQuery(createTermSubQuery("base_attributes.project_name", projectName...)), + createBoolShouldQuery(createTermSubQuery("base_attributes.metadata.agentId", agentId...)), + createBoolShouldQuery(createTermSubQuery("base_attributes.metadata.ticketId", ticketId...)), + createBoolShouldQuery(createTermSubQuery("base_attributes.metadata.emailId", emailId...)), + createBoolShouldQuery(createTermSubQuery("base_attributes.metadata.phoneNumber", phoneNumber...)), + createBoolShouldQuery(createTermSubQuery("base_attributes.session_id", sessionId...)), + createBoolShouldQuery(createTermSubQuery("base_attributes.device_id", deviceId...))} + shouldQuery := strings.Join(multipleShouldQuery, ",") + rangeQuery := createRangeQuery("base_attributes.client_timestamp", filters.StartTimestamp, filters.EndTimestamp) + + query := createEsQuery( + createSearchQuery(createBoolQuery(createMustQuery(shouldQuery, rangeQuery))), + createSortQuery(filters.SortBy, string(page.SortDirection), "strict_date_optional_time_nanos"), + createCollapseQuery("base_attributes.session_id"), + createFromQuery(page.PageSize*page.PageNumber), + createSizeQuery(page.PageSize), + ) + return query +} + +func getWebSessionWithDurationQuery(webSessionIds []string, filters request.WebSessionFilters, page *es.Page) string { + boolshouldQuery := createBoolShouldQuery(createTermSubQuery("base_attributes.session_id", webSessionIds...)) + sortQuery := createSortQuery(filters.SortBy, string(page.SortDirection), "strict_date_optional_time_nanos") + collapseQuery := createCollapseQuery("base_attributes.session_id") + minTimestampAggregationQuery := buildAggregationQuery("min_start_timestamp", createMinAggregationQuery("session_attribute.start_timestamp")) + maxTimestampAggregationQuery := buildAggregationQuery("max_end_timestamp", createMaxAggregationQuery("session_attribute.end_timestamp")) + termsAggregation := createTermsAggregationQuery("base_attributes.session_id", int(page.PageSize)) + innerAggregationQuery := createAggregationQuery(minTimestampAggregationQuery, maxTimestampAggregationQuery) + aggregationQuery := createAggregationQuery(buildAggregationQuery("buckets", termsAggregation, innerAggregationQuery)) + return createEsQuery(createSearchQuery(boolshouldQuery), sortQuery, collapseQuery, aggregationQuery, createSizeQuery(utils.EsUpperLimit)) +} + +func getFilterList(filters request.WebSessionFilters) (projectName, agentId, ticketId, sessionId, deviceId, emailId, phoneNumber []string) { + if filters.ProjectName != utils.EMPTY { + projectName = strings.Split(filters.ProjectName, utils.COMMA) + } + if filters.AgentId != utils.EMPTY { + agentId = strings.Split(filters.AgentId, utils.COMMA) + } + if filters.TicketId != utils.EMPTY { + ticketId = strings.Split(filters.TicketId, utils.COMMA) + } + if filters.SessionId != utils.EMPTY { + sessionId = strings.Split(filters.SessionId, utils.COMMA) + } + if len(filters.DeviceId) != 0 { + deviceId = filters.DeviceId + } + if filters.EmailId != utils.EMPTY { + emailId = strings.Split(filters.EmailId, utils.COMMA) + } + return projectName, agentId, ticketId, sessionId, deviceId, emailId, phoneNumber +} diff --git a/alfred/repositoryAccessLayer/appFragmentsAccessLayer.go b/alfred/repositoryAccessLayer/appFragmentsAccessLayer.go new file mode 100644 index 0000000..3eae560 --- /dev/null +++ b/alfred/repositoryAccessLayer/appFragmentsAccessLayer.go @@ -0,0 +1,34 @@ +package repositoryAccessLayer + +import ( + "alfred/mapper" + "alfred/model/ingester" + "alfred/repository" +) + +type AppFragmentsAccessLayer interface { + CreateFragment(fragmentData ingester.FragmentModel, message string, fragmentIngestionIndex string) error + FetchUniqueFragments(key string, fragmentIngestionIndex string) ([]string, error) +} + +type AppFragmentsAccessLayerImpl struct { + appFragmentsRepository repository.AppFragmentsRepository +} + +func NewAppFragmentAccessLayer(appFragmentsRepository repository.AppFragmentsRepository) AppFragmentsAccessLayer { + return &AppFragmentsAccessLayerImpl{ + appFragmentsRepository: appFragmentsRepository, + } +} + +func (ral *AppFragmentsAccessLayerImpl) CreateFragment(fragmentData ingester.FragmentModel, message string, fragmentIngestionIndex string) error { + return ral.appFragmentsRepository.CreateFragment(fragmentData, message, fragmentIngestionIndex) +} + +func (ral *AppFragmentsAccessLayerImpl) FetchUniqueFragments(key string, fragmentIngestionIndex string) ([]string, error) { + result, err := ral.appFragmentsRepository.FetchUniqueFragments(key, fragmentIngestionIndex) + if err != nil { + return nil, err + } + return mapper.MapESResponseToGetUniqueValues(result, key) +} diff --git a/alfred/repositoryAccessLayer/cruiseControlAccessLayer.go b/alfred/repositoryAccessLayer/cruiseControlAccessLayer.go new file mode 100644 index 0000000..e7a9af7 --- /dev/null +++ b/alfred/repositoryAccessLayer/cruiseControlAccessLayer.go @@ -0,0 +1,54 @@ +package repositoryAccessLayer + +import ( + "alfred/mapper" + "alfred/model/core/cruise" + "alfred/model/es" + "alfred/repository" + "alfred/utils" +) + +type CruiseControlAccessLayer interface { + CreateCruiseControlConfig(cruiseControl *cruise.ControlConfig, cruiseControlIndex string) error + FetchPreviousAppVersion(cruiseControlIndex, appOs string) (string, error) + FetchCruiseControlConfig(appVersion, appOs, cruiseControlIndex string) (*es.ESResponse, error) + FetchAllCruiseControlAppVersions(cruiseControlIndex, appOs string) ([]string, error) +} + +type CruiseControlAccessLayerImpl struct { + cruiseControlRepository repository.CruiseControlRepository +} + +func NewCruiseControlAccessLayer(cruiseControlRepository repository.CruiseControlRepository) CruiseControlAccessLayer { + return &CruiseControlAccessLayerImpl{ + cruiseControlRepository: cruiseControlRepository, + } +} + +func (ral *CruiseControlAccessLayerImpl) CreateCruiseControlConfig(cruiseControl *cruise.ControlConfig, cruiseControlIndex string) error { + return ral.cruiseControlRepository.CreateCruiseControlConfig(cruiseControl, cruiseControlIndex) +} + +func (ral *CruiseControlAccessLayerImpl) FetchPreviousAppVersion(cruiseControlIndex, appOs string) (string, error) { + result, err := ral.cruiseControlRepository.FetchPreviousAppVersion(cruiseControlIndex, appOs) + if err != nil { + return utils.EMPTY, err + } + return mapper.MapCruiseResponseToString(result) +} + +func (ral *CruiseControlAccessLayerImpl) FetchCruiseControlConfig(appVersion, appOs string, cruiseControlIndex string) (*es.ESResponse, error) { + result, err := ral.cruiseControlRepository.FetchCruiseControlConfig(appVersion, appOs, cruiseControlIndex) + if err != nil { + return nil, err + } + return mapper.MapESApiResponseToESResponse(result) +} + +func (ral *CruiseControlAccessLayerImpl) FetchAllCruiseControlAppVersions(cruiseControlIndex, appOs string) ([]string, error) { + result, err := ral.cruiseControlRepository.FetchAllCruiseControlAppVersions(cruiseControlIndex, appOs) + if err != nil { + return nil, err + } + return mapper.MapCruiseResponseToList(result) +} diff --git a/alfred/repositoryAccessLayer/deviceMetricsAccessLayer.go b/alfred/repositoryAccessLayer/deviceMetricsAccessLayer.go new file mode 100644 index 0000000..1186855 --- /dev/null +++ b/alfred/repositoryAccessLayer/deviceMetricsAccessLayer.go @@ -0,0 +1,35 @@ +package repositoryAccessLayer + +import ( + "alfred/mapper" + "alfred/model/core" + "alfred/repository" +) + +type DeviceMetricsAccessLayer interface { + InsertDeviceMetrics(deviceMetrics *core.DeviceMetricsModel, index string) error + GetDeviceMetrics(startTime int64, endTime int64, snapshotPerSecond int64, client string, index []string) ([]core.DeviceMetricsEsResponse, error) +} + +type DeviceMetricsAccessLayerImpl struct { + deviceMetricsRepository repository.DeviceMetricsRepository +} + +func NewDeviceMetricskAccessLayer(deviceMetricsRepository repository.DeviceMetricsRepository) DeviceMetricsAccessLayer { + return &DeviceMetricsAccessLayerImpl{ + deviceMetricsRepository: deviceMetricsRepository, + } +} + +func (dm *DeviceMetricsAccessLayerImpl) InsertDeviceMetrics(deviceMetrics *core.DeviceMetricsModel, index string) error { + err := dm.deviceMetricsRepository.InsertDeviceMetrics(deviceMetrics, index) + return err +} + +func (dm *DeviceMetricsAccessLayerImpl) GetDeviceMetrics(startTime int64, endTime int64, snapshotPerSecond int64, client string, index []string) ([]core.DeviceMetricsEsResponse, error) { + response, err := dm.deviceMetricsRepository.GetDeviceMetrics(startTime, endTime, snapshotPerSecond, client, index) + if err != nil { + return nil, err + } + return mapper.MapEsApiResponseToDeviceMetricsResponse(response) +} diff --git a/alfred/repositoryAccessLayer/errorEventsAccessLayer.go b/alfred/repositoryAccessLayer/errorEventsAccessLayer.go new file mode 100644 index 0000000..b6cf30d --- /dev/null +++ b/alfred/repositoryAccessLayer/errorEventsAccessLayer.go @@ -0,0 +1,79 @@ +package repositoryAccessLayer + +import ( + "alfred/mapper" + "alfred/model/es" + "alfred/repository" + "errors" +) + +type ErrorEventsAccessLayer interface { + UploadErrorEvents(message string, errorsIngestionIndex string) (int, error) + FetchSessionIdFromSessionErrorEventsWithTimeRange(errorEventsUploadIndexList []string, startTime, endTime int64, sessionErrorEventsFilter []string) ([]string, error) + FetchErrorEvents(sessionId string, errorEventsUploadIndex string) ([]es.ErrorEventsResponse, error) + GetErrorEventsLastCronTimestamp(clientName string, errorEventsTimeIndex string) (int64, error) + UpdateErrorEventsLastCronTimestamp(timestamp int64, clientName string, errorEventsTimeIndex string) error + FetchSessionErrorEventsWithKeyValue(keyValueMap map[string][]string, errorEventsIndex string) ([]es.ErrorEventsResponse, error) + UpdateErrorEventsInActiveBulk(errorEventsdocIdList, errorEventsindexList []string) error +} +type ErrorEventsAccessLayerImpl struct { + errorEventsRepository repository.ErrorEventsRepository +} + +func NewErrorEventsAccessLayer(errorEventsRepository repository.ErrorEventsRepository) ErrorEventsAccessLayer { + return &ErrorEventsAccessLayerImpl{ + errorEventsRepository: errorEventsRepository, + } +} + +func (ral *ErrorEventsAccessLayerImpl) UploadErrorEvents(message string, errorsIngestionIndex string) (int, error) { + return ral.errorEventsRepository.UploadErrorEvents(message, errorsIngestionIndex) +} + +func (ral *ErrorEventsAccessLayerImpl) FetchSessionIdFromSessionErrorEventsWithTimeRange(errorEventsUploadIndexList []string, startTime, endTime int64, sessionErrorEventsFilter []string) ([]string, error) { + result, err := ral.errorEventsRepository.FetchSessionIdFromSessionErrorEventsWithTimeRange(errorEventsUploadIndexList, startTime, endTime, sessionErrorEventsFilter) + if result.IsError() { + defer result.Body.Close() + err = errors.New("error while fetching session error events session id with time range") + } + if err != nil { + return nil, err + } + return mapper.MapEsResponseAndGetSessionIds(result) +} + +func (ral *ErrorEventsAccessLayerImpl) FetchErrorEvents(sessionId string, errorEventsUploadIndex string) ([]es.ErrorEventsResponse, error) { + result, err := ral.errorEventsRepository.FetchErrorEvents(sessionId, errorEventsUploadIndex) + if err != nil { + return nil, err + } + return mapper.MapEsResponseToErrorEventsResponse(result) +} + +func (ral *ErrorEventsAccessLayerImpl) FetchSessionErrorEventsWithKeyValue(keyValueMap map[string][]string, errorEventsIndex string) ([]es.ErrorEventsResponse, error) { + result, err := ral.errorEventsRepository.FetchSessionErrorEventsWithKeyValue(keyValueMap, errorEventsIndex) + if result.IsError() { + defer result.Body.Close() + err = errors.New("error while fetching session error events with key value") + } + if err != nil { + return nil, err + } + return mapper.MapEsResponseToErrorEventsResponse(result) +} + +func (ral *ErrorEventsAccessLayerImpl) GetErrorEventsLastCronTimestamp(clientName string, errorEventsTimeIndex string) (int64, error) { + result, err := ral.errorEventsRepository.GetErrorEventsLastCronTimestamp(errorEventsTimeIndex) + if err != nil { + return 0, err + } + return mapper.MapEsResponseToTimeStamp(clientName, result) +} + +func (ral *ErrorEventsAccessLayerImpl) UpdateErrorEventsLastCronTimestamp(timestamp int64, clientName string, errorEventsTimeIndex string) error { + return ral.errorEventsRepository.UpdateErrorEventsLastCronTimestamp(timestamp, clientName, errorEventsTimeIndex) +} + +func (ral *ErrorEventsAccessLayerImpl) UpdateErrorEventsInActiveBulk(errorEventsdocIdList, errorEventsindexList []string) error { + return ral.errorEventsRepository.UpdateErrorEventsInActiveBulk(errorEventsdocIdList, errorEventsindexList) +} diff --git a/alfred/repositoryAccessLayer/eventsAccessLayer.go b/alfred/repositoryAccessLayer/eventsAccessLayer.go new file mode 100644 index 0000000..1fdc52f --- /dev/null +++ b/alfred/repositoryAccessLayer/eventsAccessLayer.go @@ -0,0 +1,95 @@ +package repositoryAccessLayer + +import ( + "alfred/mapper" + "alfred/model/es" + "alfred/pkg/log" + "alfred/repository" + "go.uber.org/zap" +) + +type EventsAccessLayer interface { + CreateEventIngester(message string, eventIngestionIndex string) (int, error) + FetchEventsWithLabels(labels, appName, screenName, fragmentName, vertical, appVersion []string, deviceIds []string, snapshotPerSecond []int64, startTime, endTime int64, page *es.Page, eventIngestionIndexList []string, phoneNumber []string, customerId []string, codePushVersion []string, agentEmailId []string, appOs string) ([]es.EventResponse, error) + FetchEventsFromSessionId(sessionId string, page *es.Page, eventIngestionIndex string, withAggregation bool) ([]es.EventResponse, *int64, error) + FetchUniqueKeys(key string, eventIngestionIndex string) ([]string, error) + FetchAllEventsFromSession(sessionId string, page *es.Page, eventIngestionIndex string) ([]es.EventResponse, *int64, error) + FetchEventsFromSession(sessionId string, page *es.Page, eventIngestionIndex, indexName string) ([]es.EventResponse, *int64, error) + FetchZipsFromSession(sessionId string, eventIngestionIndex string, indexName string) ([]string, error) + FetchUniqueKeysWithFilters(key, eventIngestionIndex, minAppVersion string) ([]string, error) +} + +type EventsAccessLayerImpl struct { + eventsRepository repository.EventsRepository +} + +func NewEventsAccessLayer(eventsRepository repository.EventsRepository) EventsAccessLayer { + return &EventsAccessLayerImpl{ + eventsRepository: eventsRepository, + } +} + +func (ral *EventsAccessLayerImpl) CreateEventIngester(message string, eventIngestionIndex string) (int, error) { + return ral.eventsRepository.CreateEventIngester(message, eventIngestionIndex) +} + +func (ral *EventsAccessLayerImpl) FetchEventsWithLabels(labels, appName, screenName, fragmentName, vertical, appVersion []string, deviceIds []string, snapshotPerSecond []int64, startTime, endTime int64, page *es.Page, eventIngestionIndexList []string, phoneNumber []string, customerId []string, codePushVersion []string, agentEmailId []string, appOs string) ([]es.EventResponse, error) { + result, err := ral.eventsRepository.FetchEventsWithLabels(labels, appName, screenName, fragmentName, vertical, appVersion, deviceIds, snapshotPerSecond, startTime, endTime, page, eventIngestionIndexList, phoneNumber, customerId, codePushVersion, agentEmailId, appOs) + if err != nil { + return nil, err + } + eventsResponse, _, err := mapper.MapEsResponseToEventResponse(result) + if err != nil { + log.Error("elasticsearch search response mapping failed", zap.Error(err)) + return nil, err + } + return eventsResponse, nil +} + +func (ral *EventsAccessLayerImpl) FetchEventsFromSessionId(sessionId string, page *es.Page, eventIngestionIndex string, withAggregation bool) ([]es.EventResponse, *int64, error) { + result, err := ral.eventsRepository.FetchEventsFromSessionId(sessionId, eventIngestionIndex, withAggregation) + if err != nil { + return nil, nil, err + } + return mapper.MapEsResponseToEventResponse(result) +} + +func (ral *EventsAccessLayerImpl) FetchUniqueKeys(key string, eventIngestionIndex string) ([]string, error) { + result, err := ral.eventsRepository.FetchUniqueKeys(key, eventIngestionIndex) + if err != nil { + return nil, err + } + return mapper.MapESResponseToGetUniqueValues(result, key) +} + +func (ral *EventsAccessLayerImpl) FetchUniqueKeysWithFilters(key, eventIngestionIndex, minAppVersion string) ([]string, error) { + result, err := ral.eventsRepository.FetchUniqueKeysWithFilters(key, eventIngestionIndex, minAppVersion) + if err != nil { + return nil, err + } + return mapper.MapESResponseToGetUniqueValues(result, key) +} + +func (ral *EventsAccessLayerImpl) FetchAllEventsFromSession(sessionId string, page *es.Page, eventIngestionIndex string) ([]es.EventResponse, *int64, error) { + result, err := ral.eventsRepository.FetchAllEventsFromSession(sessionId, eventIngestionIndex) + if err != nil { + return nil, nil, err + } + return mapper.MapEsResponseToEventResponse(result) +} + +func (ral *EventsAccessLayerImpl) FetchEventsFromSession(sessionId string, page *es.Page, eventIngestionIndex, indexName string) ([]es.EventResponse, *int64, error) { + result, err := ral.eventsRepository.FetchEventsFromSession(sessionId, eventIngestionIndex, indexName) + if err != nil { + return nil, nil, err + } + return mapper.MapEsResponseToEventResponse(result) +} + +func (ral *EventsAccessLayerImpl) FetchZipsFromSession(sessionId string, eventIngestionIndex string, indexName string) ([]string, error) { + result, err := ral.eventsRepository.FetchZipsFromSession(sessionId, eventIngestionIndex, indexName) + if err != nil { + return nil, err + } + return mapper.MapEsResponseToZipResponse(result) +} diff --git a/alfred/repositoryAccessLayer/repositoriesAccessLayer.go b/alfred/repositoryAccessLayer/repositoriesAccessLayer.go new file mode 100644 index 0000000..817b81a --- /dev/null +++ b/alfred/repositoryAccessLayer/repositoriesAccessLayer.go @@ -0,0 +1,31 @@ +package repositoryAccessLayer + +import ( + "alfred/repository" +) + +type RepositoryAccessLayer struct { + SessionsAccessLayer SessionsAccessLayer + WebSessionsAccessLayer WebSessionsAccessLayer + EventsAccessLayer EventsAccessLayer + CruiseControlAccessLayer CruiseControlAccessLayer + AppFragmentsAccessLayer AppFragmentsAccessLayer + ErrorEventsAccessLayer ErrorEventsAccessLayer + ShedlockAccessLayer ShedlockAccessLayer + VideoGenerationStatusAccessLayer VideoGenerationStatusAccessLayer + DeviceMetricsAccessLayer DeviceMetricsAccessLayer +} + +func InitRepositoryAccessLayer(repositories *repository.Repository) *RepositoryAccessLayer { + return &RepositoryAccessLayer{ + SessionsAccessLayer: NewSessionsAccessLayer(repositories.SessionsRepository), + WebSessionsAccessLayer: NewWebSessionsAccessLayer(repositories.WebSessionsRepository), + EventsAccessLayer: NewEventsAccessLayer(repositories.EventsRepository), + CruiseControlAccessLayer: NewCruiseControlAccessLayer(repositories.CruiseControlRepository), + AppFragmentsAccessLayer: NewAppFragmentAccessLayer(repositories.AppFragmentRepository), + ErrorEventsAccessLayer: NewErrorEventsAccessLayer(repositories.ErrorEventsRepository), + ShedlockAccessLayer: NewShedlockAccessLayer(repositories.ShedlockRepository), + VideoGenerationStatusAccessLayer: NewVideoGenerationStatusAccessLayer(repositories.VideoGenerationStatusRepository), + DeviceMetricsAccessLayer: NewDeviceMetricskAccessLayer(repositories.DeviceMetricsRepository), + } +} diff --git a/alfred/repositoryAccessLayer/sessionsAccessLayer.go b/alfred/repositoryAccessLayer/sessionsAccessLayer.go new file mode 100644 index 0000000..0041442 --- /dev/null +++ b/alfred/repositoryAccessLayer/sessionsAccessLayer.go @@ -0,0 +1,126 @@ +package repositoryAccessLayer + +import ( + "alfred/mapper" + "alfred/model/es" + "alfred/model/ingester" + "alfred/pkg/log" + "alfred/repository" + "go.uber.org/zap" +) + +type SessionsAccessLayer interface { + UploadSession(alfredSessionRecordingEvent ingester.SessionUploadRequest, sessionUploadIndex string) (int, error) + FetchSessionsWithSessionIds(sessionIds []string, page *es.Page, sessionUploadIndex string) ([]es.SessionResponse, error) + FetchSessionAndSessionDurationWithSessionIds(sessionIds []string, sessionUploadIndex string, page *es.Page, sortBy string) ([]es.SessionResponse, error) + FetchSessionWithTimeRange(startTimestamp, endTimestamp int64, page *es.Page, sessionUploadIndexList []string, sessionUploadIndex, sortBy, appOs string) ([]es.SessionResponse, error) + FetchDeviceAttributesForMetrics(startTimestamp int64, endTimestamp int64, snapshotPerSecond int64, sessionUploadIndex string, page *es.Page) (*es.SessionEsResponseForDeviceAttributes, error) + UpdateSessionErrorEventsWithSessionId(sessionId []string, sessionUploadIndexList []string, hasErrors bool) error + FetchSessionWithSessionDuration(sessionIds []string, sessionUploadIndex string, page *es.Page, sortBy string) ([]es.SessionResponse, error) + FetchSessionWithLabels(appVersion []string, deviceIds []string, startTimestamp, endTimestamp int64, page *es.Page, sessionUploadSearchIndexList []string, appOs string) ([]es.SessionResponse, error) + FetchUniqueKeys(key string, sessionUploadIndex string) ([]string, error) +} + +type SessionsAccessLayerImpl struct { + sessionsRepository repository.SessionsRepository +} + +func NewSessionsAccessLayer(sessionsRepository repository.SessionsRepository) SessionsAccessLayer { + return &SessionsAccessLayerImpl{ + sessionsRepository: sessionsRepository, + } +} + +func (ral *SessionsAccessLayerImpl) UploadSession(alfredSessionRecordingEvent ingester.SessionUploadRequest, sessionUploadIndex string) (int, error) { + return ral.sessionsRepository.UploadSession(alfredSessionRecordingEvent, sessionUploadIndex) +} + +func (ral *SessionsAccessLayerImpl) FetchSessionsWithSessionIds(sessionIds []string, page *es.Page, sessionUploadIndex string) ([]es.SessionResponse, error) { + result, err := ral.sessionsRepository.FetchSessionsWithSessionIds(sessionIds, sessionUploadIndex) + if err != nil { + return nil, err + } + return mapper.MapEsResponseToSessionResponse(result) +} + +func (ral *SessionsAccessLayerImpl) FetchSessionAndSessionDurationWithSessionIds(sessionIds []string, sessionUploadIndex string, page *es.Page, sortBy string) ([]es.SessionResponse, error) { + result, err := ral.sessionsRepository.FetchSessionAndSessionDurationWithSessionIds(sessionIds, sessionUploadIndex, page, sortBy) + if err != nil { + return nil, err + } + return mapper.MapEsResponseToSessionWithDurationResponse(result) +} + +func (ral *SessionsAccessLayerImpl) FetchSessionWithTimeRange(startTimestamp, endTimestamp int64, page *es.Page, sessionUploadIndexList []string, sessionUploadIndex, sortBy, appOs string) ([]es.SessionResponse, error) { + result, err := ral.sessionsRepository.FetchSessionWithTimeRange(startTimestamp, endTimestamp, page, sessionUploadIndexList, sortBy, appOs) + if err != nil { + return nil, err + } + sessionResponse, err := mapper.MapEsResponseToSessionResponseForAllSessions(result) + if err != nil { + return nil, err + } + sessions := getUniqueSessions(sessionResponse) + + sessionWithDurationResult, err := ral.sessionsRepository.FetchSessionAndSessionDurationWithSessionIds(sessions, sessionUploadIndex, page, sortBy) + if err != nil { + return nil, err + } + return mapper.MapEsResponseToSessionWithDurationResponse(sessionWithDurationResult) +} + +func (ral *SessionsAccessLayerImpl) FetchDeviceAttributesForMetrics(startTimestamp int64, endTimestamp int64, snapshotPerSecond int64, sessionUploadIndex string, page *es.Page) (*es.SessionEsResponseForDeviceAttributes, error) { + result, err := ral.sessionsRepository.FetchDeviceAttributesForMetrics(startTimestamp, endTimestamp, snapshotPerSecond, sessionUploadIndex, page) + if err != nil { + log.Error("Fetching Device Metrics Failed:", zap.Error(err)) + return nil, err + } + return mapper.MapEsApiResponseToSessionDeviceAttributesResponse(result) +} + +func (ral *SessionsAccessLayerImpl) UpdateSessionErrorEventsWithSessionId(sessionId []string, sessionUploadIndexList []string, hasErrors bool) error { + return ral.sessionsRepository.UpdateSessionErrorEventsWithSessionId(sessionId, sessionUploadIndexList, hasErrors) +} +func (ral *SessionsAccessLayerImpl) FetchSessionWithSessionDuration(sessionIds []string, sessionUploadIndex string, page *es.Page, sortBy string) ([]es.SessionResponse, error) { + result, err := ral.sessionsRepository.FetchSessionListFromSessionIds(sessionIds, sessionUploadIndex, page, sortBy) + if err != nil { + return nil, err + } + sessionResponse, err := mapper.MapEsResponseToSessionResponseForAllSessions(result) + if err != nil { + return nil, err + } + sessions := getUniqueSessions(sessionResponse) + sessionWithDurationResult, err := ral.sessionsRepository.FetchSessionAndSessionDurationWithSessionIds(sessions, sessionUploadIndex, page, sortBy) + if err != nil { + return nil, err + } + return mapper.MapEsResponseToSessionWithDurationResponse(sessionWithDurationResult) +} + +func (ral *SessionsAccessLayerImpl) FetchSessionWithLabels(appVersion []string, deviceIds []string, startTimestamp, + endTimestamp int64, page *es.Page, sessionUploadSearchIndexList []string, appOs string) ([]es.SessionResponse, error) { + result, err := ral.sessionsRepository.FetchSessionWithLabels(appVersion, deviceIds, startTimestamp, endTimestamp, + page, sessionUploadSearchIndexList, appOs) + if err != nil { + return nil, err + } + return mapper.MapEsResponseToSessionResponse(result) +} + +func (ral *SessionsAccessLayerImpl) FetchUniqueKeys(key string, sessionUploadIndex string) ([]string, error) { + result, err := ral.sessionsRepository.FetchUniqueKeys(key, sessionUploadIndex) + if err != nil { + return nil, err + } + return mapper.MapESResponseToGetUniqueValues(result, key) +} + +func getUniqueSessions(stringSlice []es.SessionResponse) []string { + var uniqueIds []string + for _, entry := range stringSlice { + sessionId := entry.Source.BaseAttributes.SessionId + uniqueIds = append(uniqueIds, sessionId) + } + return uniqueIds +} diff --git a/alfred/repositoryAccessLayer/shedlockAccessLayer.go b/alfred/repositoryAccessLayer/shedlockAccessLayer.go new file mode 100644 index 0000000..e8a1ae6 --- /dev/null +++ b/alfred/repositoryAccessLayer/shedlockAccessLayer.go @@ -0,0 +1,46 @@ +package repositoryAccessLayer + +import ( + "alfred/mapper" + "alfred/model/common" + "alfred/model/es" + "alfred/repository" +) + +type ShedlockAccessLayer interface { + InsertShedlockForCronWithOptimisticControl(shedLock *common.ShedLock, primaryTerm *int, seqNo *int) (int, error) + InsertShedlockForCron(shedLock *common.ShedLock) error + GetShedLockStatus(cronName string) (*es.ShedlockResponse, int, error) +} + +type ShedLockAccessLayerImpl struct { + shedlockRepository repository.ShedlockRepository +} + +func NewShedlockAccessLayer(shedlockRepository repository.ShedlockRepository) ShedlockAccessLayer { + return &ShedLockAccessLayerImpl{ + shedlockRepository: shedlockRepository, + } +} + +func (sl *ShedLockAccessLayerImpl) InsertShedlockForCron(shedLock *common.ShedLock) error { + + err := sl.shedlockRepository.InsertShedlockForCron(shedLock) + return err +} + +func (sl *ShedLockAccessLayerImpl) InsertShedlockForCronWithOptimisticControl(shedLock *common.ShedLock, primaryTerm *int, seqNo *int) (int, error) { + status, err := sl.shedlockRepository.InsertShedlockForCronWithOptimisticControl(shedLock, primaryTerm, seqNo) + return status, err +} + +func (sl *ShedLockAccessLayerImpl) GetShedLockStatus(cronName string) (*es.ShedlockResponse, int, error) { + + result, err := sl.shedlockRepository.GetShedLockStatus(cronName) + if err != nil { + return nil, 0, err + } + mappedResult := mapper.MapEsapiResponseToShedlockResponse(result) + + return mappedResult, result.StatusCode, nil +} diff --git a/alfred/repositoryAccessLayer/videoGenerationStatusAccessLayer.go b/alfred/repositoryAccessLayer/videoGenerationStatusAccessLayer.go new file mode 100644 index 0000000..e661525 --- /dev/null +++ b/alfred/repositoryAccessLayer/videoGenerationStatusAccessLayer.go @@ -0,0 +1,45 @@ +package repositoryAccessLayer + +import ( + "alfred/model/core" + "alfred/repository" + "github.com/elastic/go-elasticsearch/v8/esapi" +) + +type VideoGenerationStatusAccessLayer interface { + CreateVideoGenerationStatus(videoGenerationStatus core.VideoFragmentStatusModel, message string, videoGenerationStatusIndex string) error + FetchVideoGenerationStatus(sessionId, videoGenerationStatusIndex string) (*esapi.Response, error) + UpdateFragmentVideoGenerationStatus(sessionId, videoGenerationStatusIndex, eventId, status, processedZipName string, currentFragment int64) (int, error) + UpdateVideoGenerationStatus(sessionId, videoGenerationStatusIndex, status string, videoGeneratedTillNow int) (int, error) + UpdateVideoDimensions(sessionId, videoGenerationStatusIndex string, width, height int) error +} + +type VideoGenerationStatusAccessLayerImpl struct { + videoGenerationStatusRepository repository.VideoGenerationStatusRepository +} + +func NewVideoGenerationStatusAccessLayer(videoGenerationStatusRepository repository.VideoGenerationStatusRepository) VideoGenerationStatusAccessLayer { + return &VideoGenerationStatusAccessLayerImpl{ + videoGenerationStatusRepository: videoGenerationStatusRepository, + } +} + +func (r *VideoGenerationStatusAccessLayerImpl) CreateVideoGenerationStatus(fragmentData core.VideoFragmentStatusModel, message string, videoGenerationStatusIndex string) error { + return r.videoGenerationStatusRepository.CreateVideoGenerationStatus(fragmentData, message, videoGenerationStatusIndex) +} + +func (r *VideoGenerationStatusAccessLayerImpl) FetchVideoGenerationStatus(sessionId, videoGenerationStatusIndex string) (*esapi.Response, error) { + return r.videoGenerationStatusRepository.FetchVideoGenerationStatus(sessionId, videoGenerationStatusIndex) +} + +func (r *VideoGenerationStatusAccessLayerImpl) UpdateFragmentVideoGenerationStatus(sessionId, videoGenerationStatusIndex, eventId, status, processedZipName string, currentFragment int64) (int, error) { + return r.videoGenerationStatusRepository.UpdateFragmentVideoGenerationStatus(sessionId, videoGenerationStatusIndex, eventId, status, processedZipName, currentFragment) +} + +func (r *VideoGenerationStatusAccessLayerImpl) UpdateVideoGenerationStatus(sessionId, videoGenerationStatusIndex, status string, videoGeneratedTillNow int) (int, error) { + return r.videoGenerationStatusRepository.UpdateVideoGenerationStatus(sessionId, videoGenerationStatusIndex, status, videoGeneratedTillNow) +} + +func (r *VideoGenerationStatusAccessLayerImpl) UpdateVideoDimensions(sessionId, videoGenerationStatusIndex string, width, height int) error { + return r.videoGenerationStatusRepository.UpdateVideoDimensions(sessionId, videoGenerationStatusIndex, width, height) +} diff --git a/alfred/repositoryAccessLayer/webSessionsAccessLayer.go b/alfred/repositoryAccessLayer/webSessionsAccessLayer.go new file mode 100644 index 0000000..fc67a25 --- /dev/null +++ b/alfred/repositoryAccessLayer/webSessionsAccessLayer.go @@ -0,0 +1,78 @@ +package repositoryAccessLayer + +import ( + "alfred/api/request" + "alfred/mapper" + "alfred/model/es" + "alfred/model/ingester" + "alfred/pkg/log" + "alfred/repository" + "go.uber.org/zap" +) + +type WebSessionsAccessLayer interface { + UploadWebSession(sessionUploadRequest ingester.WebSessionUploadRequest, webSessionUploadIndex string) error + FetchWebSessionsWithSessionId(sessionId, webSessionUploadIndex string) ([]es.WebSessionResponse, error) + FetchAllWebSession(filters request.WebSessionFilters, page *es.Page, webSessionUploadIndexList []string) ([]es.WebSessionResponse, error) + FetchAllWebSessionWithDuration(filters request.WebSessionFilters, page *es.Page, webSessionUploadIndex string, webSessionUploadIndexList []string) ([]es.WebSessionResponse, error) +} + +type WebSessionsAccessLayerImpl struct { + webSessionsRepository repository.WebSessionsRepository +} + +func NewWebSessionsAccessLayer(webSessionsAccessLayer repository.WebSessionsRepository) WebSessionsAccessLayer { + return &WebSessionsAccessLayerImpl{ + webSessionsRepository: webSessionsAccessLayer, + } +} + +func (ral *WebSessionsAccessLayerImpl) UploadWebSession(sessionUploadRequest ingester.WebSessionUploadRequest, webSessionUploadIndex string) error { + return ral.webSessionsRepository.UploadWebSession(sessionUploadRequest, webSessionUploadIndex) +} + +func (ral *WebSessionsAccessLayerImpl) FetchWebSessionsWithSessionId(sessionId, webSessionUploadIndex string) ([]es.WebSessionResponse, error) { + result, err := ral.webSessionsRepository.FetchWebSessionsWithSessionId(sessionId, webSessionUploadIndex) + if err != nil { + return nil, err + } + return mapper.MapEsResponseToWebSessionResponse(result, &es.Page{}) +} + +func (ral *WebSessionsAccessLayerImpl) FetchAllWebSession(filters request.WebSessionFilters, page *es.Page, webSessionUploadIndexList []string) ([]es.WebSessionResponse, error) { + result, err := ral.webSessionsRepository.FetchAllWebSession(filters, page, webSessionUploadIndexList) + if err != nil { + log.Error("error while fetching all web session in FetchAllWebSession", zap.Error(err)) + return nil, err + } + return mapper.MapEsResponseToWebSessionResponse(result, page) +} + +func (ral *WebSessionsAccessLayerImpl) FetchAllWebSessionWithDuration(filters request.WebSessionFilters, page *es.Page, webSessionUploadIndex string, webSessionUploadIndexList []string) ([]es.WebSessionResponse, error) { + result, err := ral.webSessionsRepository.FetchAllWebSession(filters, page, webSessionUploadIndexList) + if err != nil { + log.Error("error while fetching all web session in FetchAllWebSessionWithDuration", zap.Error(err)) + return nil, err + } + webSessionResponse, err := mapper.MapEsResponseToWebSessionResponse(result, page) + if err != nil { + log.Error("error while mapping es response to web session response in FetchAllWebSessionWithDuration", zap.Error(err)) + return nil, err + } + uniqueWebSessions := getUniqueWebSessions(webSessionResponse) + webSessionWithDurationResult, err := ral.webSessionsRepository.FetchWebSessionsWithDurationResponse(uniqueWebSessions, webSessionUploadIndex, filters, page) + if err != nil { + log.Error("error while fetching web session with duration in FetchAllWebSessionWithDuration", zap.Error(err)) + return nil, err + } + return mapper.MapEsResponseToWebSessionResponseWithDuration(webSessionWithDurationResult, page) +} + +func getUniqueWebSessions(stringSlice []es.WebSessionResponse) []string { + var uniqueIds []string + for _, entry := range stringSlice { + sessionId := entry.Source.WebBaseAttributes.SessionId + uniqueIds = append(uniqueIds, sessionId) + } + return uniqueIds +} diff --git a/alfred/scheduler/errorEventsHandler.go b/alfred/scheduler/errorEventsHandler.go new file mode 100644 index 0000000..0f37ed9 --- /dev/null +++ b/alfred/scheduler/errorEventsHandler.go @@ -0,0 +1,97 @@ +package scheduler + +import ( + "alfred/cmd/core/app/helper" + "alfred/config" + "alfred/pkg/log" + "alfred/repositoryAccessLayer" + "alfred/utils" + "errors" + "go.uber.org/zap" + "math" + "sync" + "time" +) + +func ErrorEventsUpdateHandler(clientName string, repositoryAccessLayer repositoryAccessLayer.RepositoryAccessLayer) error { + errorEventsUploadIndex := config.GetCoreConfig().ElasticSearchConfig.ErrorEventsUploadIndexClientMap[clientName] + sessionUploadIndex := config.GetCoreConfig().ElasticSearchConfig.SessionUploadIndexClientMap[clientName] + errorEventsLastCronTimestampIndex := config.GetCoreConfig().ElasticSearchConfig.ErrorEventsLastCronTimestampIndex + errorEventsUpdateCronDelayTimeInMinutes := config.GetCoreConfig().ElasticSearchConfig.ErrorEventsUpdateCronDelayTimeInMinutes + sessionErrorEventsFilter := config.GetCoreConfig().SessionErrorEventsFilter + endTime := utils.GetCurrentTimeInMillis() - ((errorEventsUpdateCronDelayTimeInMinutes * time.Minute).Milliseconds()) + var result interface{} + var err error + retryFuncWithResponseAndError := func() (interface{}, error) { + result, err = repositoryAccessLayer.ErrorEventsAccessLayer.GetErrorEventsLastCronTimestamp(clientName, errorEventsLastCronTimestampIndex) + return result, err + } + result, err = utils.RetryFunctionWithResponseAndError(retryFuncWithResponseAndError, config.GetCoreConfig().ElasticSearchConfig.ElasticSearchUpdateMaxRetry, config.GetCoreConfig().ElasticSearchConfig.ElasticSearchUpdateRetryBackOffInSeconds*time.Second) + if err != nil { + log.Error("Error while fetching last cron timestamp for error events "+clientName, zap.Error(err)) + return err + } + startTime := result.(int64) + errorEventsUploadIndexList := helper.CreateSearchIndex(errorEventsUploadIndex, startTime, endTime) + retryFuncWithResponseAndError = func() (interface{}, error) { + result, err = repositoryAccessLayer.ErrorEventsAccessLayer.FetchSessionIdFromSessionErrorEventsWithTimeRange(errorEventsUploadIndexList, startTime, endTime, sessionErrorEventsFilter) + return result, err + } + result, err = utils.RetryFunctionWithResponseAndError(retryFuncWithResponseAndError, config.GetCoreConfig().ElasticSearchConfig.ElasticSearchUpdateMaxRetry, config.GetCoreConfig().ElasticSearchConfig.ElasticSearchUpdateRetryBackOffInSeconds*time.Second) + if err != nil || result == nil { + log.Error("Error while fetching error events session id with time range "+clientName, zap.Error(err)) + return err + } + invalidSessionIdsList := result.([]string) + + batchSize := config.GetCoreConfig().ElasticSearchConfig.ErrorEventsUpdateBatchSize + var waitGroupForSessionUpdate sync.WaitGroup + var hasErrorMutex sync.Mutex + hasErrorInUpdate := false + maxConcurrency := config.GetCoreConfig().MaxUpdateSessionErrorEventsGofuncConcurrency + semaphore := make(chan struct{}, maxConcurrency) + + for i := 0; i < len(invalidSessionIdsList); i += batchSize { + end := int(math.Min(float64(i+batchSize), float64(len(invalidSessionIdsList)))) + invalidSessionIds := invalidSessionIdsList[i:end] + // Acquire a slot in the semaphore (blocks if the limit is reached) + semaphore <- struct{}{} + waitGroupForSessionUpdate.Add(1) + go func(invalidSessionIds []string) { + defer func() { + // Release the slot in the semaphore when the goroutine exits + <-semaphore + waitGroupForSessionUpdate.Done() + }() + retryFuncWithError := func() error { + err = repositoryAccessLayer.SessionsAccessLayer.UpdateSessionErrorEventsWithSessionId(invalidSessionIds, []string{sessionUploadIndex + "*"}, true) + return err + } + err = utils.RetryFunctionWithError(retryFuncWithError, config.GetCoreConfig().ElasticSearchConfig.ElasticSearchUpdateMaxRetry, config.GetCoreConfig().ElasticSearchConfig.ElasticSearchUpdateRetryBackOffInSeconds*time.Second) + if err != nil { + hasErrorMutex.Lock() + hasErrorInUpdate = true + log.Error("Error while updating invalid sessions with session id "+clientName, zap.Error(err)) + hasErrorMutex.Unlock() + return + } + }(invalidSessionIds) + } + waitGroupForSessionUpdate.Wait() + close(semaphore) + + if hasErrorInUpdate { + return errors.New("Error while updating invalid sessions with session id " + clientName) + } + + retryFuncWithError := func() error { + err = repositoryAccessLayer.ErrorEventsAccessLayer.UpdateErrorEventsLastCronTimestamp(endTime, clientName, errorEventsLastCronTimestampIndex) + return err + } + err = utils.RetryFunctionWithError(retryFuncWithError, config.GetCoreConfig().ElasticSearchConfig.ElasticSearchUpdateMaxRetry, config.GetCoreConfig().ElasticSearchConfig.ElasticSearchUpdateRetryBackOffInSeconds*time.Second) + if err != nil { + log.Error("Error while updating error events last cron timestamp "+clientName, zap.Error(err)) + return err + } + return nil +} diff --git a/alfred/scheduler/scheduleCronForErrorEventsUpdate.go b/alfred/scheduler/scheduleCronForErrorEventsUpdate.go new file mode 100644 index 0000000..2cf02e5 --- /dev/null +++ b/alfred/scheduler/scheduleCronForErrorEventsUpdate.go @@ -0,0 +1,74 @@ +package scheduler + +import ( + "alfred/config" + "alfred/internal/infra" + "alfred/internal/metrics" + "alfred/internal/shedlock" + "alfred/pkg/log" + "alfred/repositoryAccessLayer" + "alfred/utils" + "go.uber.org/zap" +) + +func ScheduleCronForErrorEventsUpdate(repositoryAccessLayer *repositoryAccessLayer.RepositoryAccessLayer) { + schedule := config.GetCoreConfig().ElasticSearchConfig.ErrorEventsUpdateCronSchedule + go func() { + _, err := infra.ScheduleJob(schedule, func() { + log.Info("Cron Initiated for error events update") + err := shedlock.Lock(utils.ERROR_EVENT_UPDATE_CRON, config.GetCoreConfig().ShedlockConfig.ErrorEventUpdateCronLockedUntil, repositoryAccessLayer) + if err != nil { + log.Error("Cron Already locked for error event update", zap.Error(err)) + return + } + err = ErrorEventsUpdateHandler(utils.NAVI_USER_APP, *repositoryAccessLayer) + if err != nil { + metrics.ErrorEventsUpdateCronFailureCounter.WithLabelValues(utils.NAVI_USER_APP, err.Error()).Inc() + log.Error("Error while updating error events for "+utils.NAVI_USER_APP, zap.Error(err)) + return + } + }) + if err != nil { + log.Error("Error while scheduling cron for error events update "+utils.NAVI_USER_APP, zap.Error(err)) + return + } + + }() + + //uncomment when cosmos onboard + // + //go func() { + // _, err := infra.ScheduleJob(schedule, func() { + // errorEventsUploadIndex := config.GetCoreConfig().ElasticSearchConfig.ErrorEventsUploadIndexClientMap[utils.COSMOS] + // sessionUploadIndex := config.GetCoreConfig().ElasticSearchConfig.SessionUploadIndexClientMap[utils.COSMOS] + // errorEventsLastCronTimestampIndex := config.GetCoreConfig().ElasticSearchConfig.ErrorEventsLastCronTimestampIndex + // endTime := utils.GetCurrentTimeInMillis() + // startTime, err := repositoryAccessLayer.ErrorEventsAccessLayer.GetErrorEventsLastCronTimestamp(utils.COSMOS, errorEventsLastCronTimestampIndex) + // if err != nil { + // log.Error("Error while fetching last cron timestamp for error events "+utils.COSMOS, zap.Error(err)) + // return + // } + // errorEventsUploadIndexList := helper.CreateSearchIndex(errorEventsUploadIndex, startTime, endTime) + // invalidSessionIds, err := repositoryAccessLayer.ErrorEventsAccessLayer.FetchSessionIdFromSessionErrorEventsWithTimeRange(errorEventsUploadIndexList, startTime, endTime) + // if err != nil { + // log.Error("Error while fetching error events session id with time range "+utils.COSMOS, zap.Error(err)) + // return + // } + // sessionUploadIndexList := helper.CreateSearchIndex(sessionUploadIndex, startTime, endTime) + // err = repositoryAccessLayer.SessionsAccessLayer.UpdateSessionErrorEventsWithSessionId(invalidSessionIds, sessionUploadIndexList) + // if err != nil { + // log.Error("Error while updating invalid sessions with session id "+utils.COSMOS, zap.Error(err)) + // return + // } + // err = repositoryAccessLayer.ErrorEventsAccessLayer.UpdateErrorEventsLastCronTimestamp(endTime, utils.COSMOS, errorEventsLastCronTimestampIndex) + // if err != nil { + // log.Error("Error while updating error events last cron timestamp "+utils.COSMOS, zap.Error(err)) + // return + // } + // }) + // if err != nil { + // log.Error("Error while scheduling cron for error events update "+utils.COSMOS, zap.Error(err)) + // return + // } + //}() +} diff --git a/alfred/scheduler/schedulerCronDeviceMetrics.go b/alfred/scheduler/schedulerCronDeviceMetrics.go new file mode 100644 index 0000000..f946cd9 --- /dev/null +++ b/alfred/scheduler/schedulerCronDeviceMetrics.go @@ -0,0 +1,107 @@ +package scheduler + +// +//import ( +// "alfred/cmd/core/app/service" +// "alfred/config" +// "alfred/internal/clients" +// "alfred/internal/infra" +// "alfred/internal/shedlock" +// "alfred/pkg/log" +// "alfred/repositoryAccessLayer" +// "alfred/utils" +// "go.uber.org/zap" +//) +// +//func ScheduleCronForDeviceMetricsToSlack(repositories *repositoryAccessLayer.RepositoryAccessLayer, httpClient *clients.HttpClient) { +// DeviceMetricsService := service.NewDeviceMetrics(repositories, httpClient) +// +// go func() { +// schedule := config.GetCoreConfig().DeviceMonitoringConfig.DeviceMetricsUpdateCronScheduleMap[utils.NAVI_USER_APP] +// cronTime := utils.GetCronTime(schedule) +// _, err := infra.ScheduleJob(schedule, func() { +// +// err := shedlock.Lock(utils.UPDATE_DEVICE_METRICS_CRON, config.GetCoreConfig().ShedlockConfig.DeviceMetricsCronLockedUntil, repositories) +// if err != nil { +// log.Error("Cron Already Locked for Updating Device Metrics", zap.Error(err)) +// return +// } +// +// log.Info("Cron Initiated for Updating Device Metrics For Navi App") +// +// DeviceMetricsService.UpdateDeviceMetrics(utils.NAVI_USER_APP, cronTime) +// +// }) +// if err != nil { +// log.Error("Cron Failed For Device Metrics For Navi App:", zap.Error(err)) +// return +// } +// log.Info("Cron Success for Device Metrics For Navi App") +// }() +// +// go func() { +// schedule := config.GetCoreConfig().DeviceMonitoringConfig.DeviceMetricsUpdateCronScheduleMap[utils.COSMOS] +// cronTime := utils.GetCronTime(schedule) +// _, err := infra.ScheduleJob(schedule, func() { +// +// err := shedlock.Lock(utils.UPDATE_DEVICE_METRICS_CRON_COSMOS, config.GetCoreConfig().ShedlockConfig.DeviceMetricsCronLockedUntil, repositories) +// if err != nil { +// log.Error("Cron Already Locked for Updating Device Metrics Cosmos", zap.Error(err)) +// return +// } +// +// log.Info("Cron Initiated for Updating Device Metrics For Cosmos") +// +// DeviceMetricsService.UpdateDeviceMetrics(utils.COSMOS, cronTime) +// }) +// if err != nil { +// log.Error("Cron Failed For Updating Device Metrics For Cosmos:", zap.Error(err)) +// return +// } +// log.Info("Cron Success for Updating Device Metrics For Cosmos") +// }() +// +// go func() { +// schedule := config.GetCoreConfig().DeviceMonitoringConfig.DeviceMonitoringCronScheduleMap[utils.NAVI_USER_APP] +// cronTime := utils.GetCronTime(schedule) +// _, err := infra.ScheduleJob(schedule, func() { +// +// err := shedlock.Lock(utils.DEVICE_METRICS_ALERT_CRON, config.GetCoreConfig().ShedlockConfig.DeviceMetricsCronLockedUntil, repositories) +// if err != nil { +// log.Error("Cron Already Locked for Device Metrics", zap.Error(err)) +// return +// } +// +// log.Info("Cron Initiated for Device Metrics For Navi App") +// +// DeviceMetricsService.PublishDeviceMetrics(utils.NAVI_USER_APP, cronTime) +// }) +// if err != nil { +// log.Error("Cron Failed For Device Metrics For Navi App:", zap.Error(err)) +// return +// } +// log.Info("Cron Success for Device Metrics For Navi App") +// }() +// +// go func() { +// schedule := config.GetCoreConfig().DeviceMonitoringConfig.DeviceMonitoringCronScheduleMap[utils.COSMOS] +// cronTime := utils.GetCronTime(schedule) +// _, err := infra.ScheduleJob(schedule, func() { +// +// err := shedlock.Lock(utils.DEVICE_METRICS_ALERT_CRON_COSMOS, config.GetCoreConfig().ShedlockConfig.DeviceMetricsCronLockedUntil, repositories) +// if err != nil { +// log.Error("Cron Already Locked for Device Metrics", zap.Error(err)) +// return +// } +// +// log.Info("Cron Initiated for Device Metrics For Cosmos") +// +// DeviceMetricsService.PublishDeviceMetrics(utils.COSMOS, cronTime) +// }) +// if err != nil { +// log.Error("Cron Failed For Device Metrics For Cosmos:", zap.Error(err)) +// return +// } +// log.Info("Cron Success for Device Metrics For Cosmos") +// }() +//} diff --git a/alfred/utils/common_utils.go b/alfred/utils/common_utils.go new file mode 100644 index 0000000..83eda05 --- /dev/null +++ b/alfred/utils/common_utils.go @@ -0,0 +1,38 @@ +package utils + +import ( + "alfred/model/common" +) + +func ErrorResponse(err error, code int, metadata interface{}) common.Response { + return common.Response{ + Error: &common.Error{ + Message: err.Error(), + Metadata: metadata, + }, + Status: code, + } +} + +func SuccessResponse(data interface{}, code int) common.Response { + return common.Response{ + Data: data, + Status: code, + } +} + +func AddErrorToResponse(err error, code int, metadata interface{}, response []common.Response) []common.Response { + return append(response, ErrorResponse(err, code, metadata)) +} + +func AddDataToResponse(data interface{}, code int, response []common.Response) []common.Response { + return append(response, SuccessResponse(data, code)) +} + +func SuccessPaginatedResponse(data interface{}, page common.Page, code int) common.PaginatedResponse { + return common.PaginatedResponse{ + Data: data, + Page: page, + Status: code, + } +} diff --git a/alfred/utils/constants.go b/alfred/utils/constants.go new file mode 100644 index 0000000..79d19da --- /dev/null +++ b/alfred/utils/constants.go @@ -0,0 +1,238 @@ +package utils + +import "time" + +type Extensions string + +const ( + TempDestinationFolder = "alfredTmp" + ZipContentType = "application/zip" + XzContentType = "application/x-xz" + ZipExtension Extensions = ".zip" + ZipXzExtension Extensions = ".zip.xz" + JsonExtension Extensions = ".json" + VideoExtension Extensions = ".mp4" + ImageExtensionJpeg Extensions = ".jpeg" + ImageExtensionWebp Extensions = ".webp" + GZExtension Extensions = ".gz" + TempDirectory = "tempDir" + ThirdPartyDirectory = "thirdParty" + ImageTypeJpeg = "jpeg" + ImageTypeWebp = "webp" +) + +// customer federation constants +const ( + CustomerFederationUrlByReferenceId = "%s/customer-federation-layer" + Query = "{\"query\": \"{getCustomer(customerReferenceId: \\\"%s\\\") {getDevice{deviceId,customerReferenceId}}}\"}" +) + +// customer service constants +const ( + CustomerProfileServiceUrlByPhoneNumber = "%s/customer-profile/internal/v2/customer/search?type=PHONE_NUMBER&value=%s" + CustomerProfileServiceUrlByExternalId = "%s/customer-profile/internal/v2/customer/search?type=EXTERNAL_ID&value=%s" + TenantIdHeadersKey = "X-TENANT-ID" + CoorelationId = "X-Correlation-Id" + TeamName = "X-Team-Name" + CRMTENANT = "crm" +) + +// NAVI filter names +const ( + EVENTS_SCREEN_NAME = "events.screen_name" + FRAGMENT_NAME = "fragment_name" + SCREEN_NAME = "screen_name" + VERTICAL = "vertical" +) + +// litmus proxy service constants +const ( + LitmusProxyUrl = "%s/litmus-proxy/v1/proxy/experiment?name=%s" + DeviceIdHeadersKey = "deviceId" +) + +// es query upper limits +const ( + SessionUpperLimit = 10000 + EventsUpperLimit = 10000 + EsUpperLimit = 10000 +) + +// DefaultCacheTtl cache constants +const ( + DefaultCacheTtl = 20 * time.Minute +) + +// UUID SUFFIX +const ( + SESSION_SUFFIX = "ALFRED_SESSION_ID" + EVENT_SUFFIX = "ALFRED_EVENT_ID" + WEB_SESSION_SUFFIX = "" +) + +// Headers keys +const ( + X_SESSION_TOKEN = "X-Session-Token" + X_EMAIL_ID = "X-Email-Id" + X_API_KEY = "X-Api-Key" + ApplicationJsonContentType = "application/json" + X_PLATFORM = "X-Platform" + X_AUTH_KEY = "X-Auth-Key" +) + +// Query Parameters +const ( + WEB_PROJECT_NAME = "project_name" + ANDROID_OS = "android" + IOS_OS = "ios" +) + +// ClientName +const ( + CLIENT_NAME = "CLIENT_NAME" + NAVI_USER_APP = "NaviUserApp" + NAVI_USER_APP_IOS = "NaviUserAppIos" + COSMOS = "CosmosApp" + LONGHORN = "longhorn" + TRIBUTE_WEB = "tribute-web" +) + +// Errors +const ( + BAD_REQUEST = "Bad Request" + INVALID_CLIENT = "client is not whitelisted" + INVALID_WEB_CLIENT = "web client is not whitelisted" + NO_SESSION_FOUND = "no session found" +) + +// Joiners +const ( + COLON = ":" + HYPHEN = "-" + EMPTY = "" + UNDERSCORE = "_" + NEWLINE = "\n" + FORWARD_SLASH = "/" + ASTERISK = "*" + DOT = "." + COMMA = "," + SEMICOLON = ";" + AMPERSAND = "&" +) + +// Ingester Endpoints +const ( + WEB_SESSIONS_V2 = "/v2/ingest/web/sessions" + CRUISE_CONTROL = "/cruise" + CRUISE_CONTROL_V2 = "/v2/cruise" + ERROR_EVENTS_API = "/error-events" + PING = "/ping" + PONG = "pong" + PPROF = "/pprof" + ADMIN = "/admin" +) + +const ( + WEB_SESSION_VALUE = "webSession" + WEB_SESSIONS_V2_VALUE = "webSessionV2" + CRUISE_CONTROL_VALUE = "cruise" + CRUISE_CONTROL_V2_VALUE = "cruiseV2" +) + +// Query Params +const ( + PROJECT_NAME = "project_name" + SESSION_ID = "session_id" + FRAGMENTS_TILL_NOW = "fragments_till_now" +) + +// Date Time constants +const ( + IST_TIME_ZONE = "Asia/Kolkata" + DateFormat = "2006-01-02" + IndexDateRegex = `(\d{4}-\d{1,2}-\d{1,2})` +) + +// FPS related +const ( + DEFAULT_RECORDING_FPS = 1 + RECORDING_2_FPS = 2 +) + +// Device Attributes +const ( + BATTERY_HIGH = "BatteryHigh" + BATTERY_MEDIUM = "BatteryMedium" + BATTERY_LOW = "BatteryLow" + NETWORK_STRENGTH = "NetworkStrength" + MEMORY_HIGH = "MemoryHigh" + MEMORY_LOW = "MemoryLow" + DEVICE_ID = "deviceId" + HIGH = "High" + MEDIUM = "Medium" + FIVE_G = "5g" + FOUR_G = "4g" + WIFI = "wifi" + CUSTOMER_ID = "customer_id" +) + +// Video Generation Status +const ( + COMPLETED = "COMPLETED" + PENDING = "PENDING" + MASKING_DONE = "MASKED" + TOUCH_POINTS_ADDED = "TOUCH_POINTS_ADDED" +) + +const ( + P90 = 0.9 + P95 = 0.95 + P50 = 0.5 +) + +const ( + UPDATE_DEVICE_METRICS_CRON = "Update-Device-Metrics-Cron" + DEVICE_METRICS_ALERT_CRON = "Device-Metrics-Cron" + UPDATE_DEVICE_METRICS_CRON_COSMOS = "Update-Device-Metrics-Cron-Cosmos" + DEVICE_METRICS_ALERT_CRON_COSMOS = "Device-Metrics-Cron-Cosmos" + ERROR_EVENT_UPDATE_CRON = "Error-Event-Update-Cron" +) + +const ( + PROCESS_FILE_NAME_SUFFIX = "PROCESSED" + TO_BE_MASKED_FILE_SUFFIX = "TO_BE_MASKED" + MASKED_FILE_SUFFIX = "MASKED" +) + +const ( + USER_EMAIL_HEADER = "X-User-Email" + IMAGE_TYPE = "image_type" + FILE_TYPE_EXTENSION = "file_type_extension" +) + +// Event Attributes +const ( + START_X = "START_X" + START_Y = "START_Y" +) + +// Masking +const ( + BLUR_MODE = "BLUR" + DS_MODE = "DS" + DsMaskingSuccessResponse = "200 OK" + DsMaskingUploadUrl = "upload_url" +) + +// Litmus +const ( + COSMOS_ENABLE_EXPERIMENT = "COSMOS_ENABLE_EXPERIMENT" + FPS_EXPERIMENT = "FPS_EXPERIMENT" + IMAGE_TYPE_EXPERIMENT = "IMAGE_TYPE_EXPERIMENT" + NAVI_USER_APP_IOS_EXPERIMENT = "NAVI_USER_APP_IOS_EXPERIMENT" + FILE_TYPE_EXTENSION_EXPERIMENT = "FILE_TYPE_EXTENSION_EXPERIMENT" +) + +func (e Extensions) String() string { + return string(e) +} diff --git a/alfred/utils/data_structure_utils.go b/alfred/utils/data_structure_utils.go new file mode 100644 index 0000000..45da0f5 --- /dev/null +++ b/alfred/utils/data_structure_utils.go @@ -0,0 +1,34 @@ +package utils + +import "sort" + +type Set map[string]bool + +func (s Set) Add(element string) { + s[element] = true +} + +func (s Set) Remove(element string) { + delete(s, element) +} + +func (s Set) Contains(element string) bool { + return s[element] +} + +func (s Set) Size() int { + return len(s) +} + +func (s Set) Elements() []string { + elements := make([]string, 0, len(s)) + for element := range s { + elements = append(elements, element) + } + + sort.Slice(elements, func(i, j int) bool { + return elements[i] < elements[j] + }) + + return elements +} diff --git a/alfred/utils/datetime_utils.go b/alfred/utils/datetime_utils.go new file mode 100644 index 0000000..60e928e --- /dev/null +++ b/alfred/utils/datetime_utils.go @@ -0,0 +1,62 @@ +package utils + +import ( + "alfred/pkg/log" + "fmt" + "github.com/gorhill/cronexpr" + "go.uber.org/zap" + "strings" + "time" +) + +func GetCurrentTimeInMillis() int64 { + timeInIst, err := time.LoadLocation("Asia/Kolkata") + if err != nil { + return 0 + } + return time.Now().In(timeInIst).UnixMilli() +} + +func GetTimeWithOffsetInMillis(offset time.Duration) int64 { + timeInIst, err := time.LoadLocation("Asia/Kolkata") + if err != nil { + return 0 + } + return time.Now().In(timeInIst).Add(offset).UnixMilli() +} + +func GetCurrentDate() (int, time.Month, int) { + timeInIst, err := time.LoadLocation("Asia/Kolkata") + if err != nil { + return 0, 0, 0 + } + return time.Now().In(timeInIst).Date() +} + +func GetDateWithOffset(offset time.Duration) (int, time.Month, int) { + timeInIst, err := time.LoadLocation("Asia/Kolkata") + if err != nil { + return 0, 0, 0 + } + return time.Now().In(timeInIst).Add(offset).Date() +} + +func NormalizeDate(date string) string { + dateSplits := strings.Split(date, "-") + if len(dateSplits) != 3 { + log.Error("invalid date format", zap.String("date", date)) + return EMPTY + } + dateSplits[1] = fmt.Sprintf("%02s", dateSplits[1]) + dateSplits[2] = fmt.Sprintf("%02s", dateSplits[2]) + return strings.Join(dateSplits, "-") +} + +func GetCronTime(cronExpression string) int64 { + // Parse the cron expression + expr, _ := cronexpr.Parse(cronExpression) + + diffTime := expr.Next(time.Now()).UnixMilli() - time.Now().UnixMilli() + + return diffTime +} diff --git a/alfred/utils/index_utils.go b/alfred/utils/index_utils.go new file mode 100644 index 0000000..d9ae0a2 --- /dev/null +++ b/alfred/utils/index_utils.go @@ -0,0 +1,28 @@ +package utils + +import ( + "alfred/pkg/log" + "go.uber.org/zap" + "regexp" + "time" +) + +func CompareDateAndFindKey(indexName string, case1, case2, date string) string { + indexDateRegex := regexp.MustCompile(IndexDateRegex) + indexDate := NormalizeDate(indexDateRegex.FindStringSubmatch(indexName)[1]) + compareDate := NormalizeDate(date) + parsedIndexdate, err := time.Parse(DateFormat, indexDate) + if err != nil { + log.Error("Error while parsing date", zap.Error(err)) + return EMPTY + } + parsedCompareDate, err := time.Parse(DateFormat, compareDate) + if err != nil { + log.Error("Error while parsing date", zap.Error(err)) + return EMPTY + } + if parsedIndexdate.Before(parsedCompareDate) { + return case1 + } + return case2 +} diff --git a/alfred/utils/json_util.go b/alfred/utils/json_util.go new file mode 100644 index 0000000..9237ae1 --- /dev/null +++ b/alfred/utils/json_util.go @@ -0,0 +1,18 @@ +package utils + +import ( + "encoding/json" +) + +func Convert[T any, V any](obj T, result *V) error { + messageBytes, err := json.Marshal(obj) + if err != nil { + return err + } + + err = json.Unmarshal(messageBytes, &result) + if err != nil { + return err + } + return nil +} diff --git a/alfred/utils/list_util.go b/alfred/utils/list_util.go new file mode 100644 index 0000000..7fe9f59 --- /dev/null +++ b/alfred/utils/list_util.go @@ -0,0 +1,12 @@ +package utils + +func Contains(elements []string, targetElement string) bool { + + for _, element := range elements { + if element == targetElement { + return true + } + } + return false + +} diff --git a/alfred/utils/os_utils.go b/alfred/utils/os_utils.go new file mode 100644 index 0000000..cd508c5 --- /dev/null +++ b/alfred/utils/os_utils.go @@ -0,0 +1,85 @@ +package utils + +import ( + "alfred/pkg/log" + "go.uber.org/zap" + "io" + "os" + "path/filepath" + "regexp" + "strings" +) + +func DeleteFileFromLocal(uuidSessionId string, events *[]string) { + //deleting files from local + _ = os.RemoveAll(filepath.Join(TempDestinationFolder, uuidSessionId)) + for _, event := range *events { + eventCopy := event + _ = os.Remove(filepath.Join(TempDestinationFolder, eventCopy+ZipExtension.String())) + } +} + +func DeleteFileFromLocalWithExtension(uuidSessionId string, events *[]string, fileTypeExtension string) { + _ = os.RemoveAll(filepath.Join(TempDestinationFolder, uuidSessionId)) + for _, event := range *events { + eventCopy := event + _ = os.Remove(filepath.Join(TempDestinationFolder, eventCopy+fileTypeExtension)) + } +} + +func DeleteAllFiles(directoryPath string) { + os.RemoveAll(directoryPath) +} + +func CreateDirectory(directoryPath string) error { + err := os.MkdirAll(directoryPath, os.ModePerm) + if err != nil { + log.Error("Error creating directory", zap.String("directoryPath", directoryPath)) + return err + } + return nil +} + +func CopyFile(src, dest string) error { + sourceFile, err := os.Open(src) + if err != nil { + return err + } + defer sourceFile.Close() + + destFile, err := os.Create(dest) + if err != nil { + return err + } + defer destFile.Close() + + _, err = io.Copy(destFile, sourceFile) + if err != nil { + return err + } + + _, err = sourceFile.Seek(0, 0) + if err != nil { + return err + } + + return nil +} + +func FolderExists(folderPath string) bool { + + _, err := os.Stat(folderPath) + + return err == nil || os.IsExist(err) +} + +func ConvertToValidFilename(input string) string { + // Replace invalid characters with underscores + re := regexp.MustCompile(`[^\w]+`) + validFilename := re.ReplaceAllString(input, "_") + + // Remove leading and trailing underscores + validFilename = strings.Trim(validFilename, "_") + + return validFilename +} diff --git a/alfred/utils/retry_util.go b/alfred/utils/retry_util.go new file mode 100644 index 0000000..fd2673b --- /dev/null +++ b/alfred/utils/retry_util.go @@ -0,0 +1,37 @@ +package utils + +import ( + "alfred/pkg/log" + "errors" + "go.uber.org/zap" + "time" +) + +func RetryFunctionWithResponseAndError(fn func() (interface{}, error), maxRetries int, initialDelay time.Duration) (interface{}, error) { + var response interface{} + var err error + for retry := 0; retry < maxRetries; retry++ { + if response, err = fn(); err == nil { + return response, nil // Operation succeeded. + } + log.Error("Error while retrying", zap.Error(err)) + // Calculate the delay using exponential backoff. + delay := initialDelay * (1 << uint(retry)) + time.Sleep(delay) + } + return nil, errors.New("retry with error and response failed") +} + +func RetryFunctionWithError(fn func() error, maxRetries int, initialDelay time.Duration) error { + var err error + for retry := 0; retry < maxRetries; retry++ { + if err = fn(); err == nil { + return nil // Operation succeeded. + } + log.Error("Error while retrying", zap.Error(err)) + // Calculate the delay using exponential backoff. + delay := initialDelay * (1 << uint(retry)) + time.Sleep(delay) + } + return errors.New("retry with error failed") +} diff --git a/alfred/utils/sort_utils.go b/alfred/utils/sort_utils.go new file mode 100644 index 0000000..2629692 --- /dev/null +++ b/alfred/utils/sort_utils.go @@ -0,0 +1,99 @@ +package utils + +import ( + "alfred/api/response" + "alfred/model/common" + "sort" +) + +type responseDescSorter []common.Response + +func (a responseDescSorter) Len() int { + return len(a) +} + +func (a responseDescSorter) Swap(i, j int) { + a[i], a[j] = a[j], a[i] +} + +func (a responseDescSorter) Less(i, j int) bool { + if a[i].Data == nil { + return false + } + first := a[i].Data.(response.SessionResponseData).RecordStartingTime + if a[j].Data == nil { + return true + } + second := a[j].Data.(response.SessionResponseData).RecordStartingTime + return first > second +} + +type responseAscSorter []common.Response + +func (a responseAscSorter) Len() int { + return len(a) +} + +func (a responseAscSorter) Swap(i, j int) { + a[i], a[j] = a[j], a[i] +} + +func (a responseAscSorter) Less(i, j int) bool { + first := a[i].Data.(response.SessionResponseData).RecordStartingTime + second := a[j].Data.(response.SessionResponseData).RecordStartingTime + return first > second +} + +func Sort(unsortedSlice *[]common.Response) { + sort.Sort(responseDescSorter(*unsortedSlice)) +} + +func SortAsc(unsortedSlice *[]common.Response) { + sort.Sort(responseAscSorter(*unsortedSlice)) +} + +type searchSessionResponseDescSorter []common.Response + +func (a searchSessionResponseDescSorter) Len() int { + return len(a) +} + +func (a searchSessionResponseDescSorter) Swap(i, j int) { + a[i], a[j] = a[j], a[i] +} + +func (a searchSessionResponseDescSorter) Less(i, j int) bool { + if a[i].Data == nil { + return false + } + first := a[i].Data.(response.SearchSessionResponseData).CreatedAt + if a[j].Data == nil { + return true + } + second := a[j].Data.(response.SearchSessionResponseData).CreatedAt + return first > second +} + +type searchSessionResponseAscSorter []common.Response + +func (a searchSessionResponseAscSorter) Len() int { + return len(a) +} + +func (a searchSessionResponseAscSorter) Swap(i, j int) { + a[i], a[j] = a[j], a[i] +} + +func (a searchSessionResponseAscSorter) Less(i, j int) bool { + first := a[i].Data.(response.SearchSessionResponseData).CreatedAt + second := a[j].Data.(response.SearchSessionResponseData).CreatedAt + return first < second +} + +func SortSession(unsortedSlice *[]common.Response) { + sort.Sort(searchSessionResponseDescSorter(*unsortedSlice)) +} + +func SortAscSession(unsortedSlice *[]common.Response) { + sort.Sort(searchSessionResponseAscSorter(*unsortedSlice)) +} diff --git a/alfred/utils/string_utils.go b/alfred/utils/string_utils.go new file mode 100644 index 0000000..ce58180 --- /dev/null +++ b/alfred/utils/string_utils.go @@ -0,0 +1,76 @@ +package utils + +import ( + "alfred/pkg/log" + "errors" + "go.uber.org/zap" + "strconv" +) + +func GetStringValue(value interface{}) (string, error) { + if strVal, ok := value.(string); ok { + return strVal, nil + } + return EMPTY, errors.New("value is not a string") +} + +func GetFloat64FromString(value string) (output float64) { + if value == EMPTY { + output = 0 + } else { + output, _ = strconv.ParseFloat(value, 64) + } + return output +} + +func GetInt64FromString(value string) (output int64) { + if value == EMPTY { + output = 0 + } else { + output, _ = strconv.ParseInt(value, 10, 64) + } + return output +} + +func ConvertStringListToInt64List(stringList []string) ([]int64, error) { + var int64List []int64 + + for _, str := range stringList { + i, _ := strconv.ParseInt(str, 10, 64) + int64List = append(int64List, i) + } + + return int64List, nil +} + +func GetIntFromString(value string) (output int) { + output = 0 + if value == EMPTY { + return output + } + + parsedValue, err := strconv.ParseFloat(value, 64) + if err != nil { + log.Error("Error while converting string to float64:", zap.Error(err)) + return output + } + + // Convert float64 to int + return int(parsedValue) +} + +func CreateSlicesFromStringList(data []string, size int) [][]string { + var slices [][]string + + for i := 0; i < len(data); i += size { + end := i + size + if end > len(data) { + end = len(data) + } + + slice := data[i:end] + slices = append(slices, slice) + } + + return slices +} diff --git a/alfred/utils/vaildations.go b/alfred/utils/vaildations.go new file mode 100644 index 0000000..bc992a1 --- /dev/null +++ b/alfred/utils/vaildations.go @@ -0,0 +1,116 @@ +package utils + +import ( + "alfred/model/common" + "alfred/pkg/log" + "errors" + "fmt" + "github.com/google/uuid" + "go.uber.org/zap" + "regexp" + "strconv" + "strings" + "time" +) + +func ValidateTimestampsForWeb(startTimestamp, endTimestamp string, lagTime time.Duration) (int64, int64, error) { + startTimeInteger, err := strconv.ParseInt(startTimestamp, 0, 64) + + //introducing lag of 5 min to avoid the case where events are present but zip is still pending to be uploaded + if err != nil { + return 0, GetTimeWithOffsetInMillis(-lagTime), nil + } + endTimeInteger, err := strconv.ParseInt(endTimestamp, 0, 64) + if err != nil { + return startTimeInteger, GetTimeWithOffsetInMillis(-lagTime), nil + } + if startTimeInteger > endTimeInteger { + return 0, 0, errors.New("start time should be less than end time") + } + return startTimeInteger, endTimeInteger, nil +} + +func ValidateTimestamps(startTimestamp, endTimestamp string) (int64, int64, error) { + startTimeInteger := GetInt64FromString(startTimestamp) + endTimeInteger := GetInt64FromString(endTimestamp) + + if startTimeInteger > endTimeInteger { + return 0, 0, errors.New("start time should be less than end time") + } + + return startTimeInteger, endTimeInteger, nil +} + +func ValidatePage(pageSize, pageNumber, sortDirection string) (int64, int64, common.SortDirection, error) { + pageSizeInteger, err := strconv.ParseInt(pageSize, 0, 64) + if err != nil { + pageSizeInteger = 10 + } + pageNumberInteger, err := strconv.ParseInt(pageNumber, 0, 64) + if err != nil { + pageNumberInteger = 0 + } + + sortEnum := common.SortDirection(EMPTY) + if sortDirection == "asc" { + sortEnum = common.ASC + } else { + sortEnum = common.DESC + } + + return pageSizeInteger, pageNumberInteger, sortEnum, nil +} + +func ValidateFileExtension(filename, fileExtension string) (bool, error) { + if !strings.HasSuffix(filename, fileExtension) { + return false, errors.New(fmt.Sprintf("filename %s should be of extension: %s", filename, fileExtension)) + } + return true, nil +} + +func ValidateDirectoryName(directoryName string) bool { + if directoryName == EMPTY || strings.Contains(directoryName, FORWARD_SLASH) { + return false + } + return true +} + +func ValidateId(id, suffix string) bool { + trimmedId := strings.TrimSuffix(id, suffix) + + _, err := uuid.Parse(trimmedId) + return err == nil +} +func ValidateZipName(id, suffix string) bool { + trimmedBody := strings.TrimSuffix(id, suffix) + if trimmedBody == EMPTY { + return false + } + return true +} + +func CheckFilterEnabled(labelFilters, customerId, phoneNumber, appName, screenName, vertical, appVersion string, deviceIds []string, screenTags string, codePushVersion string, agentEmailId string, fragmentNames string, snapshotPerSecond string) bool { + return labelFilters != EMPTY || customerId != EMPTY || phoneNumber != EMPTY || appName != EMPTY || screenName != EMPTY || vertical != EMPTY || + appVersion != EMPTY || deviceIds != nil || screenTags != EMPTY || codePushVersion != EMPTY || agentEmailId != EMPTY || fragmentNames != EMPTY || snapshotPerSecond != EMPTY +} + +func ValidatePresentTime(timeStamp int64, allowedPastTimestamp, allowedFutureTimestamp time.Duration) bool { + presentTime := GetCurrentTimeInMillis() + futureTimeAllowed := presentTime + (allowedFutureTimestamp * time.Hour).Milliseconds() + pastTimeAllowed := presentTime - (allowedPastTimestamp * time.Hour).Milliseconds() + return timeStamp <= futureTimeAllowed && timeStamp >= pastTimeAllowed +} + +func ValidatePhoneNumber(phoneNumber string) bool { + // Check if the phoneNumber is empty + if phoneNumber == EMPTY { + return true + } + + match, err := regexp.MatchString(`^\+?(\d{1,2})?\d{10}$`, phoneNumber) + if err != nil { + log.Error("Error: Phone Number Validation Failed", zap.Error(err)) + return false + } + return match +}