Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Question: is it worth to store the reference to a map? #1006

Open
dgonzalezKentech opened this issue Nov 26, 2024 · 1 comment
Open

Question: is it worth to store the reference to a map? #1006

dgonzalezKentech opened this issue Nov 26, 2024 · 1 comment

Comments

@dgonzalezKentech
Copy link

Go version: 1.22.4
Hazelcast Go Client version: v1.4.2

Simple question:

we use HZ to store token information with some maps, not too many (5),

We do like 1000 req/s to 3 or 4 maps, but for every single request to the map.get we get the map first. (GetMap)

We could store the map reference in a "golang map" (as we have a generic interface with HZ) in our code instead of relaying on the HZ function to get the map.

Would this be worth it? Is the GetMap doing a request to the server every single time? I checked the code of the library and seems to save some references internally...

`

   func (repo CacheRepo) Get(ctx context.Context, key, mapName string) (data interface{}, err error) {
logger.LOGContext(ctx).Debugf("[CacheRepo][Get][key:%s][mapName:%s]", key, mapName)

defer func(start time.Time) {
	if err == nil {
		tools.EndCacheResponseTimer(start, "get")
	}
}(time.Now())

cacheMap, err := repo.Cache.GetMap(ctx, mapName)
if err != nil {
	return nil, fmt.Errorf("[CacheRepo][Get][GetMap][mapName:%s][err:%w]", mapName, err)
}
data, err = cacheMap.Get(ctx, key)
if err != nil {
	return nil, fmt.Errorf("[CacheRepo][Get][cacheMap.Get][mapName:%s][key:%s][err:%w]", mapName, key, err)
}

logger.LOGContext(ctx).Debugf("[CacheRepo][Get][data:%v]", data)
return data, nil
   }

`

like somethink like (pseudo code):

map, ok := mymaps[mapName] if !ok { map = repo.Cache.GetMap(ctx, mapName) mymaps[mapName] = map }

Thank you

@dgonzalezKentech
Copy link
Author

I created a small program to test getting the map all the time or only ones, and it is like 90% faster to only get it ones.
Is should be the way to go? to keep a reference to the map? or if we do, maybe we break something else?

Standard scenario response time: 21.9712609s
Standard scenario processing rate: 455.14 ops/sec

Optimized scenario response time: 11.0851385s
Optimized scenario processing rate: 902.11 ops/sec

Standard scenario response time (parallel): 1.0739231s
Standard scenario processing rate (parallel): 9311.65 ops/sec

Optimized scenario response time (parallel): 320.1793ms
Optimized scenario processing rate (parallel): 31232.50 ops/sec

package main

import (
	"context"
	"fmt"
	"time"

	"github.com/hazelcast/hazelcast-go-client"
)

const itemSize = 10000

func main() {
	ctx := context.TODO()

	// Create Hazelcast Client
	config := hazelcast.NewConfig()
	config.Cluster.Network.SetAddresses("10.21.98.61:30705")
	client, err := hazelcast.StartNewClientWithConfig(ctx, config)
	if err != nil {
		fmt.Println("Failed to start Hazelcast client:", err)
		return
	}
	defer client.Shutdown(ctx)

	// Scenarios
	standardResponseTime := measureStandard(ctx, client)
	optimizedResponseTime := measureOptimized(ctx, client)
	//wrongResponseTime := measureWrong(ctx, client)

	fmt.Println("Standard scenario response time:", standardResponseTime)
	fmt.Printf("Standard scenario processing rate: %.2f ops/sec\n\n", float64(itemSize)/standardResponseTime.Seconds())

	fmt.Println("Optimized scenario response time:", optimizedResponseTime)
	fmt.Printf("Optimized scenario processing rate: %.2f ops/sec\n\n", float64(itemSize)/optimizedResponseTime.Seconds())

	//fmt.Println("Wrong scenario response time:", wrongResponseTime)
	//fmt.Printf("Wrong scenario processing rate: %.2f ops/sec\n\n", float64(itemSize)/wrongResponseTime.Seconds())

	// Parallel Scenarios
	workers := 50
	standardResponseTimeParallel := measureStandardParallel(ctx, client, workers)
	optimizedResponseTimeParallel := measureOptimizedParallel(ctx, client, workers)
	//wrongResponseTimeParallel := measureWrongParallel(ctx, client, workers)

	fmt.Println("\nStandard scenario response time (parallel):", standardResponseTimeParallel)
	fmt.Printf("Standard scenario processing rate (parallel): %.2f ops/sec\n\n", float64(itemSize)/standardResponseTimeParallel.Seconds())

	fmt.Println("Optimized scenario response time (parallel):", optimizedResponseTimeParallel)
	fmt.Printf("Optimized scenario processing rate (parallel): %.2f ops/sec\n\n", float64(itemSize)/optimizedResponseTimeParallel.Seconds())

	//fmt.Println("Wrong scenario response time (parallel):", wrongResponseTimeParallel)
	//fmt.Printf("Wrong scenario processing rate (parallel): %.2f ops/sec\n\n", float64(itemSize)/wrongResponseTimeParallel.Seconds())
}

func measureStandard(ctx context.Context, client *hazelcast.Client) time.Duration {
	start := time.Now()
	for i := 0; i < itemSize; i++ {
		mapName := fmt.Sprintf("map%d", i)
		myMap, _ := client.GetMap(ctx, mapName)
		_, _ = myMap.Get(ctx, "key")
	}
	return time.Since(start)
}

func measureOptimized(ctx context.Context, client *hazelcast.Client) time.Duration {
	myMap, _ := client.GetMap(ctx, "myMap")
	start := time.Now()
	for i := 0; i < itemSize; i++ {
		_, _ = myMap.Get(ctx, fmt.Sprintf("key%d", i))
	}
	return time.Since(start)
}

func measureWrong(ctx context.Context, client *hazelcast.Client) time.Duration {
	start := time.Now()
	for i := 0; i < itemSize; i++ {
		keyName := fmt.Sprintf("key%d", i)
		myMap, _ := client.GetMap(ctx, keyName)
		_, _ = myMap.Get(ctx, "map")
	}
	return time.Since(start)
}
func measureStandardParallel(ctx context.Context, client *hazelcast.Client, workers int) time.Duration {
	start := time.Now()
	jobs := make(chan int, itemSize)
	results := make(chan struct{}, itemSize)

	for w := 0; w < workers; w++ {
		go func() {
			for i := range jobs {
				mapName := fmt.Sprintf("map%d", i)
				myMap, _ := client.GetMap(ctx, mapName)
				_, _ = myMap.Get(ctx, "key")
				results <- struct{}{}
			}
		}()
	}

	for i := 0; i < itemSize; i++ {
		jobs <- i
	}
	close(jobs)

	for i := 0; i < itemSize; i++ {
		<-results
	}
	return time.Since(start)
}

func measureOptimizedParallel(ctx context.Context, client *hazelcast.Client, workers int) time.Duration {
	myMap, _ := client.GetMap(ctx, "myMap")
	start := time.Now()
	jobs := make(chan int, itemSize)
	results := make(chan struct{}, itemSize)

	for w := 0; w < workers; w++ {
		go func() {
			for i := range jobs {
				_, _ = myMap.Get(ctx, fmt.Sprintf("key%d", i))
				results <- struct{}{}
			}
		}()
	}

	for i := 0; i < itemSize; i++ {
		jobs <- i
	}
	close(jobs)

	for i := 0; i < itemSize; i++ {
		<-results
	}
	return time.Since(start)
}

func measureWrongParallel(ctx context.Context, client *hazelcast.Client, workers int) time.Duration {
	start := time.Now()
	jobs := make(chan int, itemSize)
	results := make(chan struct{}, itemSize)

	for w := 0; w < workers; w++ {
		go func() {
			for i := range jobs {
				keyName := fmt.Sprintf("key%d", i)
				myMap, _ := client.GetMap(ctx, keyName)
				_, _ = myMap.Get(ctx, "map")
				results <- struct{}{}
			}
		}()
	}

	for i := 0; i < itemSize; i++ {
		jobs <- i
	}
	close(jobs)

	for i := 0; i < itemSize; i++ {
		<-results
	}
	return time.Since(start)
}

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant