Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Integrate new cross-platform symbolicator #1800

Draft
wants to merge 21 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from 14 commits
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
ab8c29d
chore(backend): add new migrator service
detj Oct 11, 2024
1b1e6b5
chore(backend): update journal and add volume to migrator
detj Oct 15, 2024
cb85758
chore: merge branch 'main' into integrate-symbolicator
detj Jan 22, 2025
0b70276
chore: merge branch 'main' into integrate-symbolicator
detj Jan 27, 2025
989856f
chore: merge branch 'main' into integrate-symbolicator
detj Jan 27, 2025
39575f1
chore: merge branch 'main' into integrate-symbolicator
detj Jan 30, 2025
c6cf47a
chore(backend): modify put builds api
detj Feb 5, 2025
8060e00
test(backend): add dif test
detj Feb 5, 2025
351d37a
chore(backend): update sessionator record
detj Feb 5, 2025
07c1451
chore(backend): symbolicator wip
detj Feb 11, 2025
8d69ff8
chore(backend): add sleep functions to chrono
detj Feb 13, 2025
01641d5
chore(backend): add cache package
detj Feb 13, 2025
17e5a87
docs(backend): update codec package docs
detj Feb 13, 2025
fd4f13f
chore(backend): add support for multiple mapping files
detj Feb 20, 2025
bcb1c06
chore(backend): add code comments
detj Feb 20, 2025
8b0d6cd
chore(backend): support multiple mapping files in sessionator
detj Feb 23, 2025
d662018
chore(backend): support multi dsym ingest
detj Feb 24, 2025
75b08d4
chore(backend): add symbolicator origin in server config
detj Feb 24, 2025
4d764c8
chore(backend): remove unused code
detj Feb 24, 2025
7a0eeb1
test(backend): add test for lru cache
detj Feb 24, 2025
2b88811
chore(backend): modify events table to store binary images
detj Feb 24, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
76 changes: 76 additions & 0 deletions backend/api/cache/lru.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
package cache

import (
"container/list"
"sync"
)

// LRUCache represents a thread-safe
// LRU cache.
type LRUCache struct {
cap int
cache map[string]*list.Element
list *list.List
mu sync.Mutex
}

type entry struct {
key string
value any
}

// New initialized a new LRU cache
// with given capacity.
func NewLRUCache(cap int) *LRUCache {
return &LRUCache{
cap: cap,
cache: make(map[string]*list.Element),
list: list.New(),
}
}

// Get retrieves a value from the cache
// and marks it as recently used.
func (c *LRUCache) Get(key string) (any, bool) {
c.mu.Lock()
defer c.mu.Unlock()

if elem, ok := c.cache[key]; ok {
c.list.MoveToFront(elem)
return elem.Value.(*entry).value, true
}

return nil, false
}

// Put inserts a value into the cache, evicting
// the least recently used item if necessary.
func (c *LRUCache) Put(key string, value any) {
c.mu.Lock()
defer c.mu.Unlock()

if elem, ok := c.cache[key]; ok {
c.list.MoveToFront(elem)
elem.Value.(*entry).value = value
return
}

if c.list.Len() >= c.cap {
c.evict()
}

e := &entry{key, value}
elem := c.list.PushFront(e)
c.cache[key] = elem
}

// evict removes the least recently
// used item from the cache.
func (c *LRUCache) evict() {
elem := c.list.Back()
if elem != nil {
c.list.Remove(elem)
kv := elem.Value.(*entry)
delete(c.cache, kv.key)
}
}
14 changes: 14 additions & 0 deletions backend/api/chrono/chrono.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package chrono
import (
"encoding/json"
"fmt"
"math/rand/v2"
"time"
)

Expand All @@ -28,3 +29,16 @@ func (i *ISOTime) Scan(src interface{}) error {
return fmt.Errorf("failed to convert to ISOTime type from %T", t)
}
}

// Sleep sleeps for d duration.
func Sleep(d time.Duration) {
time.Sleep(d)
}

// JitterySleep sleeps with randomly
// added jitter. Prefer this sleep
// to avoid thundering herd problems.
func JitterySleep(d time.Duration) {
jitter := time.Duration(rand.IntN(10)) * time.Second
Sleep(d + jitter)
}
35 changes: 35 additions & 0 deletions backend/api/codec/codec.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
package codec

import (
"archive/tar"
"compress/gzip"
"errors"
"io"
)

// IsTarGz validates that the expected file
// is a valid gzipped tarball.
func IsTarGz(file io.Reader) (err error) {
// check if gzip file
gzipReader, err := gzip.NewReader(file)
if err != nil {
if errors.Is(err, gzip.ErrHeader) {
err = errors.Join(errors.New("not a valid gzip file"), err)
return
}
err = errors.Join(errors.New("gzip check failed"), err)
return
}

defer gzipReader.Close()

// check if tar archive
tarReader := tar.NewReader(gzipReader)
_, err = tarReader.Next()
if err != nil {
err = errors.Join(errors.New("not a valid tar file"), err)
return
}

return
}
4 changes: 4 additions & 0 deletions backend/api/codec/doc.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
// Package codec provides capabilities for
// encoding/decoding or inflating/deflating
// needs.
package codec
39 changes: 34 additions & 5 deletions backend/api/event/event.go
Original file line number Diff line number Diff line change
Expand Up @@ -320,11 +320,40 @@ type ANR struct {
}

type Exception struct {
Handled bool `json:"handled" binding:"required"`
Exceptions ExceptionUnits `json:"exceptions" binding:"required"`
Threads Threads `json:"threads" binding:"required"`
Fingerprint string `json:"fingerprint"`
Foreground bool `json:"foreground" binding:"required"`
Handled bool `json:"handled" binding:"required"`
Exceptions ExceptionUnits `json:"exceptions" binding:"required"`
Threads Threads `json:"threads" binding:"required"`
Fingerprint string `json:"fingerprint"`
Foreground bool `json:"foreground" binding:"required"`
BinaryImages []BinaryImage `json:"binary_images"`
}

// BinaryImage represents each binary image
// entry as appearning in an Apple crash
// report.
//
// Only applicable for Darwin apps.
type BinaryImage struct {
// StartAddr is the address where the binary
// is loaded in virtual memory.
StartAddr string `json:"start_addr" binding:"required"`
// EndAddr is the upper memory boundary of
// the binary.
EndAddr string `json:"end_addr" binding:"required"`
// System indicates a system binary marker.
System bool `json:"system" binding:"required"`
// Name is the name of the app, framework
// or library binary.
Name string `json:"name" binding:"required"`
// Arch is the CPU architecture the binary
// is compiled for.
Arch string `json:"arch" binding:"required"`
// Uuid is the unique fingerprint for
// the build of the binary.
Uuid string `json:"uuid" binding:"required"`
// Path is path where the binary was
// located at runtime.
Path string `json:"path" binding:"required"`
}

// FingerprintComputer describes the behavior
Expand Down
6 changes: 3 additions & 3 deletions backend/api/event/frame.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,6 @@ type FrameiOS struct {
SymbolAddress string `json:"symbol_address"`
// Offset is the byte offset.
Offset int `json:"offset"`
// InApp is `true` if the frame originates
// from the app module.
InApp bool `json:"in_app"`
}

type Frame struct {
Expand All @@ -43,6 +40,9 @@ type Frame struct {
ClassName string `json:"class_name"`
// MethodName is the name of the originating method.
MethodName string `json:"method_name"`
// InApp is `true` if the frame originates
// from the app module.
InApp bool `json:"in_app"`
FrameiOS
}

Expand Down
131 changes: 79 additions & 52 deletions backend/api/measure/event.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ import (
"backend/api/numeric"
"backend/api/server"
"backend/api/span"
"backend/api/symbol"
"backend/api/symbolicator"
"context"
"encoding/json"
"errors"
Expand Down Expand Up @@ -247,6 +247,13 @@ func (e *eventreq) read(c *gin.Context, appId uuid.UUID) error {
ev.HotLaunch.Compute()
}

// read platfrom from payload
// if we haven't figured out
// platform already.
if e.platform == "" {
e.platform = e.events[0].Attribute.Platform
}

e.events = append(e.events, ev)
}

Expand Down Expand Up @@ -274,6 +281,13 @@ func (e *eventreq) read(c *gin.Context, appId uuid.UUID) error {
e.bumpSize(int64(len(bytes)))
sp.AppID = appId

// read platfrom from payload
// if we haven't figured out
// platform already.
if e.platform == "" {
e.platform = e.spans[0].Attributes.Platform
}

e.spans = append(e.spans, sp)
}

Expand Down Expand Up @@ -2170,61 +2184,74 @@ func PutEvents(c *gin.Context) {
}

if eventReq.needsSymbolication() {
// symbolicate
symbolicator, err := symbol.NewSymbolicator(&symbol.Options{
Origin: os.Getenv("SYMBOLICATOR_ORIGIN"),
Store: server.Server.PgPool,
})
if err != nil {
msg := `failed to initialize symbolicator`
fmt.Println(msg, err)
c.JSON(http.StatusInternalServerError, gin.H{
"error": msg,
"details": err.Error(),
})
return
}

origin := os.Getenv("SYMBOLICATOR_ORIGIN")
platform := eventReq.platform
source := symbolicator.NewS3Source("msr-symbols", server.Server.Config.SymbolsBucket, server.Server.Config.SymbolsBucketRegion, server.Server.Config.AWSEndpoint, server.Server.Config.SymbolsAccessKey, server.Server.Config.SymbolsSecretAccessKey)
symblctr := symbolicator.New(origin, platform, []symbolicator.Source{source})
events := eventReq.getSymbolicationEvents()

batches := symbolicator.Batch(events)

// start span to trace symbolication
symbolicationTracer := otel.Tracer("symbolication-tracer")
_, symbolicationSpan := symbolicationTracer.Start(ctx, "symbolicate-events")

defer symbolicationSpan.End()

for i := range batches {
// If symoblication fails for whole batch, continue
if err := symbolicator.Symbolicate(ctx, batches[i]); err != nil {
msg := `failed to symbolicate batch`
fmt.Println(msg, err)
continue
}

// If symbolication succeeds but has errors while decoding individual frames, log them and proceed
if len(batches[i].Errs) > 0 {
for _, err := range batches[i].Errs {
fmt.Println("symbolication err: ", err.Error())
}
}
symblctr.Symbolicate(events)
}

// rewrite symbolicated events to event request
for j := range batches[i].Events {
eventId := batches[i].Events[j].ID
idx, exists := eventReq.symbolicate[eventId]
if !exists {
fmt.Printf("event id %q not found in symbolicate cache, batch index: %d, event index: %d\n", eventId, i, j)
continue
}
eventReq.events[idx] = batches[i].Events[j]
delete(eventReq.symbolicate, eventId)
}
}
c.JSON(http.StatusOK, gin.H{"ok": "whatever"})
return

eventReq.bumpSymbolication()
}
// if eventReq.needsSymbolication() {
// // symbolicate
// symbolicator, err := symbol.NewSymbolicator(&symbol.Options{
// Origin: os.Getenv("SYMBOLICATOR_ORIGIN"),
// Store: server.Server.PgPool,
// })
// if err != nil {
// msg := `failed to initialize symbolicator`
// fmt.Println(msg, err)
// c.JSON(http.StatusInternalServerError, gin.H{
// "error": msg,
// "details": err.Error(),
// })
// return
// }

// events := eventReq.getSymbolicationEvents()

// batches := symbolicator.Batch(events)

// // start span to trace symbolication
// symbolicationTracer := otel.Tracer("symbolication-tracer")
// _, symbolicationSpan := symbolicationTracer.Start(ctx, "symbolicate-events")

// defer symbolicationSpan.End()

// for i := range batches {
// // If symoblication fails for whole batch, continue
// if err := symbolicator.Symbolicate(ctx, batches[i]); err != nil {
// msg := `failed to symbolicate batch`
// fmt.Println(msg, err)
// continue
// }

// // If symbolication succeeds but has errors while decoding individual frames, log them and proceed
// if len(batches[i].Errs) > 0 {
// for _, err := range batches[i].Errs {
// fmt.Println("symbolication err: ", err.Error())
// }
// }

// // rewrite symbolicated events to event request
// for j := range batches[i].Events {
// eventId := batches[i].Events[j].ID
// idx, exists := eventReq.symbolicate[eventId]
// if !exists {
// fmt.Printf("event id %q not found in symbolicate cache, batch index: %d, event index: %d\n", eventId, i, j)
// continue
// }
// eventReq.events[idx] = batches[i].Events[j]
// delete(eventReq.symbolicate, eventId)
// }
// }

// eventReq.bumpSymbolication()
// }

if eventReq.hasAttachments() {
// start span to trace attachment uploads
Expand Down
Loading
Loading