System.Collections.Generic.List.Add(System.Threading.Tasks.Task)

Here are the examples of the csharp api System.Collections.Generic.List.Add(System.Threading.Tasks.Task) taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

1813 Examples 7

19 Source : ManagerDatabase.cs
with Apache License 2.0
from aryice

public int DatabaseAttachAll(SqlBaseItemXml basemodel)
        {
            List<Task<int>> listtask = new List<Task<int>>();
            int revalue = 0;
            var serverlist = DBConfig.GetServerItemXmlConfigList(basemodel);
            foreach (var item in serverlist)
            {
                Task<int> t = new Task<int>(n => AttachTask((SqlServerItemXml)n), item);
                t.Start();
                listtask.Add(t);
            }
            revalue = listtask.Sum(m => m.Result);
            Console.WriteLine("执行完成,附加数据库:" + revalue);
            return revalue;
        }

19 Source : ManagerTable.cs
with Apache License 2.0
from aryice

public void TableAddIndexAll(SqlBaseItemXml basemodel)
        {
            List<Task<int>> listtask = new List<Task<int>>();
            int revalue = 0;
            var serverlist = DBConfig.GetServerItemXmlConfigList(basemodel);
            foreach (var item in serverlist)
            {
                Task<int> t = new Task<int>(n => TableAddIndexServer((SqlServerItemXml)n), item);
                t.Start();
                listtask.Add(t);
            }
            revalue = listtask.Sum(m => m.Result);
            Console.WriteLine("添加索引执行完成,影响服务器数:" + revalue);
        }

19 Source : ManagerTable.cs
with Apache License 2.0
from aryice

public void TableDelIndexAll(SqlBaseItemXml basemodel)
        {
            List<Task<int>> listtask = new List<Task<int>>();
            int revalue = 0;
            var serverlist = DBConfig.GetServerItemXmlConfigList(basemodel);
            foreach (var item in serverlist)
            {
                Task<int> t = new Task<int>(n => TableDelIndexServer((SqlServerItemXml)n), item);
                t.Start();
                listtask.Add(t);
            }
            revalue = listtask.Sum(m => m.Result);
            Console.WriteLine("删除索引执行完成,影响服务器数:" + revalue);
        }

19 Source : ManagerDatabase.cs
with Apache License 2.0
from aryice

public int DatabaseDetachAll(SqlBaseItemXml basemodel)
        {
            List<Task<int>> listtask = new List<Task<int>>();
            int revalue = 0;
            var serverlist = DBConfig.GetServerItemXmlConfigList(basemodel);
            foreach (var item in serverlist)
            {
                Task<int> t = new Task<int>(n => DetachTask((SqlServerItemXml)n), item);
                t.Start();
                listtask.Add(t);
            }
            revalue = listtask.Sum(m => m.Result);
            Console.WriteLine("执行完成,分离数据库:" + revalue);
            return revalue;
        }

19 Source : ManagerServer.cs
with Apache License 2.0
from aryice

public void ServerCacheClearAll(SqlBaseItemXml basemodel)
        {
            List<Task<int>> listtask = new List<Task<int>>();
            var serverlist = DBConfig.GetServerItemXmlConfigList(basemodel);
            foreach (var item in serverlist)
            {
                Task<int> t = new Task<int>(n => ServerCacheClearItem((SqlServerItemXml)n), item);
                t.Start();
                listtask.Add(t);
            }
            int revalue = listtask.Sum(m => m.Result);
            Console.WriteLine("测试连接执行完成,操作服务器数:" + revalue);
        }

19 Source : ManagerServer.cs
with Apache License 2.0
from aryice

public void ServerConnectionCheckAll(SqlBaseItemXml basemodel)
        {
            List<Task<int>> listtask = new List<Task<int>>();
            var serverlist = DBConfig.GetServerItemXmlConfigList(basemodel);
            foreach (var item in serverlist)
            {
                Task<int> t = new Task<int>(n => ServerConnectionCheckItem((SqlServerItemXml)n), item);
                t.Start();
                listtask.Add(t);
            }
            int revalue = listtask.Sum(m => m.Result);
            new ManagerConfig().SaveConfig(basemodel);
            Console.WriteLine("测试连接执行完成,操作服务器数:" + revalue);
        }

19 Source : ManagerTable.cs
with Apache License 2.0
from aryice

public void TableUseCheckAll(SqlBaseItemXml basemodel)
        {
            List<Task<int>> listtask = new List<Task<int>>();
            int revalue = 0;
            var serverlist = DBConfig.GetServerItemXmlConfigList(basemodel);
            foreach (var item in serverlist)
            {
                Task<int> t = new Task<int>(n => TableUseCheckServer((SqlServerItemXml)n), item);
                t.Start();
                listtask.Add(t);
            }
            revalue = listtask.Sum(m => m.Result);
            new ManagerConfig().SaveConfig(basemodel);
            Console.WriteLine("检查表,涉及服务器数:" + revalue);
        }

19 Source : DBTask.cs
with Apache License 2.0
from aryice

public long SyncTaskManagerSum(List<TaskDataParam> taskdata, int tasknum = 20, bool IsFreeCache=true)
        {
            List<Task<long>> listtask = new List<Task<long>>();
            runnumold = taskdata.Count;
            runnumcurrent = 0;
            while (runnumold > runnumcurrent)
            {
                listtask.Clear();
                if (IsFreeCache)
                {
                    var tempmodel = taskdata[runnumcurrent];
                    DBProxy.GetDBAccess(tempmodel.dbtype).ServerCacheClear(tempmodel.connstr);
                }
                for (int i = 1; (i < tasknum) && (runnumold > runnumcurrent); i++)
                {
                    Task<long> t = new Task<long>(n => SyncTaskSum((TaskDataParam)n), taskdata[runnumcurrent++]);
                    t.Start();
                    listtask.Add(t);
                }
                rowcount += listtask.Sum(m => m.Result);
            }
            Console.WriteLine("执行完成:" + rowcount);
            return rowcount;
        }

19 Source : BotModule.cs
with GNU Affero General Public License v3.0
from asmejkal

[Command("cleanup", "commands", "Cleans output of the bot's commands.", CommandFlags.DirectMessageAllow | CommandFlags.OwnerOnly)]
        [Parameter("Count", ParameterType.Int, ParameterFlags.Remainder, "number of commands to cleanup")]
        public async Task CleanupCommands(ICommand command)
        {
            var messages = await command.Message.Channel.GetMessagesAsync(500).ToListAsync();
            var tasks = new List<Task>();
            var count = 0;
            foreach (var m in messages.SelectMany(x => x).Where(x => x.Author.Id == _client.CurrentUser.Id))
            {
                if (++count > command["Count"].AsInt)
                    break;

                tasks.Add(m.DeleteAsync());
            }

            await Task.WhenAll(tasks);
        }

19 Source : Program.cs
with Apache License 2.0
from aspnet

public static void Main(string[] args)
        {
            Uri uri = new Uri("http://localhost:12345/small-immediate-syncwrite");
            HttpClient client = new HttpClient();
            List<Task> offloads = new List<Task>();

            for (int i = 0; i < 10; i++)
            {
                Task offload = Task.Run(async () =>
                    {
                        try
                        {
                            for (int j = 0; j < 100000; j++)
                            {
                                HttpResponseMessage response = await client.GetAsync(uri);
                                response.EnsureSuccessStatusCode();
                                response.Dispose();
                            }
                        }
                        catch (Exception ex)
                        {
                            Console.WriteLine(ex);
                        }
                    });
                offloads.Add(offload);
            }

            Task.WaitAll(offloads.ToArray());
        }

19 Source : StorageManager.cs
with Apache License 2.0
from aspnet

public async Task AddMessagesAsync(CloudQueue queue, IEnumerable<CloudQueueMessage> messages)
        {
            if (queue == null)
            {
                throw new ArgumentNullException(nameof(queue));
            }

            try
            {
                var addTasks = new List<Task>();
                foreach (var message in messages)
                {
                    var addTask = queue.AddMessageAsync(message);
                    addTasks.Add(addTask);
                }

                await Task.WhenAll(addTasks);
            }
            catch (Exception ex)
            {
                var errorMessage = GetStorageErrorMessage(ex);
                var statusCode = GetStorageStatusCode(ex);
                var message = string.Format(CultureInfo.CurrentCulture, AzureStorageResources.StorageManager_OperationFailed, statusCode, errorMessage);
                _logger.Error(message, ex);
            }
        }

19 Source : StorageManager.cs
with Apache License 2.0
from aspnet

public async Task DeleteMessagesAsync(CloudQueue queue, IEnumerable<CloudQueueMessage> messages)
        {
            if (queue == null)
            {
                throw new ArgumentNullException(nameof(queue));
            }

            try
            {
                var deleteTasks = new List<Task>();
                foreach (var message in messages)
                {
                    var deleteTask = queue.DeleteMessageAsync(message);
                    deleteTasks.Add(deleteTask);
                }

                await Task.WhenAll(deleteTasks);
            }
            catch (Exception ex)
            {
                var errorMessage = GetStorageErrorMessage(ex);
                var statusCode = GetStorageStatusCode(ex);
                var message = string.Format(CultureInfo.CurrentCulture, AzureStorageResources.StorageManager_OperationFailed, statusCode, errorMessage);
                _logger.Error(message, ex);
            }
        }

19 Source : AzureKeyVaultConfigBuilder.cs
with MIT License
from aspnet

public override ICollection<KeyValuePair<string, string>> GetAllValues(string prefix)
        {
            ConcurrentDictionary<string, string> d = new ConcurrentDictionary<string, string>(StringComparer.OrdinalIgnoreCase);
            List<Task> tasks = new List<Task>();

            foreach (string key in _allKeys)
            {
                if (key.StartsWith(prefix, StringComparison.OrdinalIgnoreCase))
                    tasks.Add(Task.Run(() => GetValueAsync(key).ContinueWith(t =>
                    {
                        // Azure Key Vault keys are case-insensitive, so there shouldn't be any races here.
                        // Include version information. It will get filtered out later before updating config.
                        KeyVaultSecret secret = t.Result;
                        if (secret != null)
                        {
                            string versionedKey = key + "/" + (secret.Properties?.Version ?? "0");
                            d[versionedKey] = secret.Value;
                        }
                    })));
            }
            Task.WhenAll(tasks).Wait();

            return d;
        }

19 Source : Program.cs
with Apache License 2.0
from asynkron

private static void RunBatchClient(int batchSize)
        {
            var idenreplacedies = new ClusterIdenreplacedy[actorCount];
            for (var i = 0; i < actorCount; i++)
            {
                var id = "myactor" + i;
                idenreplacedies[i] = ClusterIdenreplacedy.Create(id,"hello");
            }
            
            var logger = Log.CreateLogger(nameof(Program));

            _ = SafeTask.Run(async () => {
                    var cluster = await Configuration.SpawnClient();
                    var rnd = new Random();
                    var semapreplaced = new AsyncSemapreplaced(5);

                    while (true)
                    {
                        semapreplaced.Wait(() => RunBatch(rnd, cluster));
                    }
                }
            );

            async Task RunBatch(Random? rnd, Cluster cluster)
            {
                var requests = new List<Task>();

                try
                {
                    var ct = CancellationTokens.FromSeconds(20);

                    var ctx = cluster.System.Root.CreateBatchContext(batchSize,ct);
                    for (var i = 0; i < batchSize; i++)
                    {
                        var id = idenreplacedies![rnd!.Next(0, actorCount)];
                        var request = SendRequest(cluster, id, ct, ctx);

                        requests.Add(request);
                    }

                    await Task.WhenAll(requests);
                }
                catch (Exception x)
                {
                    logger.LogError(x, "Error...");
                }
            }
        }

19 Source : ProtoHost.cs
with Apache License 2.0
from asynkron

private async Task RunRequestLoop()
        {
            await Task.Yield();

            var rnd = new Random();

            while (!_appLifetime.ApplicationStopping.IsCancellationRequested)
            {
                var tasks = new List<Task>();

                for (var i = 0; i < 1000; i++)
                {
                    var id = rnd.Next(0, 100000);
                    var t = _cluster.RequestAsync<int>($"abc{id}", "kind", 123, new CancellationTokenSource(20000).Token);
                    tasks.Add(t);
                }

                await Task.WhenAll(tasks);
            }
        }

19 Source : Program.cs
with Apache License 2.0
from asynkron

private static async Task RunKafkaConsumeLoop(Cluster cluster)
        {
            while (true)
            {
                var sw = Stopwatch.StartNew();
                //get the messages from Kafka or other log/queue
                var messages = GetBatchFromKafka();
                var tasks = new List<Task>();

                //forward each message to their actors
                foreach (var message in messages)
                {
                    object m = message.MessageCase switch
                    {
                        MyEnvelope.MessageOneofCase.SomeMessage      => message.SomeMessage,
                        MyEnvelope.MessageOneofCase.SomeOtherMessage => message.SomeOtherMessage,
                        _                                            => throw new ArgumentOutOfRangeException(nameof(message), "Unknown message case")
                    };

                    var task = cluster
                        .RequestAsync<Ack>(message.DeviceId, "device", m, CancellationTokens.FromSeconds(5));

                    tasks.Add(task);
                }

                //await response form all actors
                await Task.WhenAll(tasks);
                //TODO: commit back to Kafka that all messages succeeded
                sw.Stop();
                var tps = 1000.0 / sw.Elapsed.TotalMilliseconds * tasks.Count;

                //show throughput, messages per second
                Console.WriteLine(tps.ToString("n0"));
            }

19 Source : Program.cs
with Apache License 2.0
from asynkron

static async Task Main()
        {
            Log.SetLoggerFactory(LoggerFactory.Create(l =>
                    l.AddConsole().SetMinimumLevel(LogLevel.Information)
                )
            );
            Console.WriteLine("1) Run local");
            Console.WriteLine("2) Run remote");
            var runRemote = Console.ReadLine() == "2";
            
            Console.WriteLine("Subscriber Count, default 10");

            if (!int.TryParse(Console.ReadLine(), out var subscriberCount))
            {
                subscriberCount = 10;
            }
            
            var system = GetSystem();

            if (runRemote)
            {
                await RunMember(); //start the subscriber node
                
                await system
                    .Cluster()
                    .StartClientAsync();
            }
            else
            {
                await system
                    .Cluster()
                    .StartMemberAsync();
            }

            var props = Props.FromFunc(ctx => {
                    if (ctx.Message is SomeMessage s)
                    {
                 //       Console.Write(".");
                    }

                    return Task.CompletedTask;
                }
            );

            for (int j = 0; j < subscriberCount; j++)
            {
                var pid1 = system.Root.Spawn(props);
                //subscribe the pid to the my-topic
                await system.Cluster().Subscribe("my-topic", pid1);
            }

            //get hold of a producer that can send messages to the my-topic
            var p = system.Cluster().Producer("my-topic");

            Console.WriteLine("starting");

            var sw = Stopwatch.StartNew();
            var tasks = new List<Task>();

            for (var i = 0; i < 100; i++)
            {
                var t = p.ProduceAsync(new SomeMessage
                    {
                        Value = i,
                    }
                );
                tasks.Add(t);
            }

       
            await Task.WhenAll(tasks);
            tasks.Clear();
            ;
            
            sw.Restart();

            Console.WriteLine("Running...");
            var messageCount = 1_000_000;

            for (int i = 0; i < messageCount; i++)
            {
                tasks.Add(p.ProduceAsync(new SomeMessage
                        {
                            Value = i,
                        }
                    )
                );
            }

            await Task.WhenAll(tasks);
            sw.Stop();

            var tps = (messageCount * subscriberCount) / sw.ElapsedMilliseconds * 1000;
            Console.WriteLine($"Time {sw.Elapsed.TotalMilliseconds}");
            Console.WriteLine($"Messages per second {tps:N0}");
        }

19 Source : PartitionIdentityActor.cs
with Apache License 2.0
from asynkron

private async Task OnClusterTopologyInner(ClusterTopology msg, IContext context)
        {
       //     await _cluster.MemberList.TopologyConsensus(CancellationTokens.FromSeconds(5));
            var members = msg.Members.ToArray();
            _topologyHash = msg.TopologyHash;
            _rdv.UpdateMembers(members);

            //remove all idenreplacedies we do no longer own.
            _parreplacedionLookup.Clear();

            var requests = new List<Task<IdenreplacedyHandoverResponse>>();
            var requestMsg = new IdenreplacedyHandoverRequest
            {
                TopologyHash = _topologyHash,
                Address = _myAddress
            };

            requestMsg.Members.AddRange(members);

            foreach (var member in members)
            {
                var activatorPid = ParreplacedionManager.RemoteParreplacedionPlacementActor(member.Address);
                var request =
                    GetIdenreplacediesForMember(context, activatorPid, requestMsg);
                requests.Add(request);
            }

            try
            {
                Logger.LogDebug("Requesting ownerships");

                //built in timeout on each request above
                var responses = await Task.WhenAll(requests);
                Logger.LogDebug("Got ownerships {EventId}", _topologyHash);

                foreach (var response in responses)
                {
                    foreach (var actor in response.Actors)
                    {
                        TakeOwnership(actor);

                        if (!_parreplacedionLookup.ContainsKey(actor.ClusterIdenreplacedy))
                            Logger.LogError("Ownership bug, we should own {Idenreplacedy}", actor.ClusterIdenreplacedy);
                        else
                            Logger.LogDebug("I have ownership of {Idenreplacedy}", actor.ClusterIdenreplacedy);
                    }
                }
            }
            catch (Exception x)
            {
                Logger.LogError(x, "Failed to get idenreplacedies");
                throw;
            }
            
            var membersLookup = msg.Members.ToDictionary(m => m.Address, m => m);

            //scan through all id lookups and remove cases where the address is no longer part of cluster members
            foreach (var (actorId, pid) in _parreplacedionLookup.ToArray())
            {
                if (!membersLookup.ContainsKey(pid.Address)) _parreplacedionLookup.Remove(actorId);
            }
        }

19 Source : CustomLoadDemo.cs
with Apache License 2.0
from atteneder

async Task CustomDeferAgent() {
        // Recommended: Use a common defer agent across multiple GLTFast instances!
        
        // For a stable frame rate:
        IDeferAgent deferAgent = gameObject.AddComponent<TimeBudgetPerFrameDeferAgent>();
        // Or for faster loading:
        deferAgent = new UninterruptedDeferAgent();

        var tasks = new List<Task>();
        
        foreach( var url in manyUrls) {
#if GLTFAST_4_OR_NEWER
            var gltf = new GLTFast.GltfImport(null,deferAgent);
#else
            var gltf = new GLTFast.GLTFast(null,deferAgent);
#endif
            var task = gltf.Load(url).ContinueWith(
                t => {
                    if (t.Result) {
#if GLTFAST_4_OR_NEWER
                        gltf.InstantiateMainScene(transform);
                        for (int sceneId = 0; sceneId < gltf.sceneCount; sceneId++) {
                            gltf.InstantiateScene(transform, sceneId);
                        }
#else
                        gltf.InstantiateGltf(transform);
#endif
                    }
                },
                TaskScheduler.FromCurrentSynchronizationContext()
                );
            tasks.Add(task);
        }

        await Task.WhenAll(tasks);
    }

19 Source : SimultaneousMassLoader.cs
with Apache License 2.0
from atteneder

protected override async void MreplacedLoadRoutine (SampleSet sampleSet) {

        stopWatch.StartTime();

        GLTFast.IDeferAgent deferAgent;
        if(strategy==Strategy.Fast) {
            deferAgent = new GLTFast.UninterruptedDeferAgent();
        } else {
            deferAgent = gameObject.AddComponent<GLTFast.TimeBudgetPerFrameDeferAgent>();
        }

        var loadTasks = new List<Task>(sampleSet.itemCount);

        if(local) {
            foreach(var item in sampleSet.GereplacedemsPrefixed()) {
                var loadTask = LoadIt(
#if LOCAL_LOADING
                    string.Format( "file://{0}", item.path)
#else
                    item.path
#endif
                    ,deferAgent
                );
                loadTasks.Add(loadTask);
                await deferAgent.BreakPoint();
            }
        } else {
            foreach(var item in sampleSet.GereplacedemsPrefixed(false)) {
                var loadTask = LoadIt(item.path,deferAgent);
                loadTasks.Add(loadTask);
                await deferAgent.BreakPoint();
            }
        }

        await Task.WhenAll(loadTasks);
        
        stopWatch.StopTime();
        Debug.LogFormat("Finished loading {1} glTFs in {0} milliseconds!",stopWatch.lastDuration,sampleSet.itemCount);

        var selectSet = GetComponent<SampleSetSelectGui>();
        selectSet.enabled = true;
    }

19 Source : DracoRuntimeTests.cs
with Apache License 2.0
from atteneder

async Task LoadBatch(int quanreplacedy, NativeArray<byte> data, bool requireNormals = false, bool requireTangents = false) {

            var tasks = new List<Task<Mesh>>(quanreplacedy);
        
            for (var i = 0; i < quanreplacedy; i++)
            {
                DracoMeshLoader dracoLoader = new DracoMeshLoader();
                var task = dracoLoader.ConvertDracoMeshToUnity(data,requireNormals,requireTangents);
                tasks.Add(task);
            }

            while (tasks.Count > 0) {
                var task = await Task.WhenAny(tasks);
                tasks.Remove(task);
                var mesh = await task;
                if (mesh == null) {
                    Debug.LogError("Loading mesh failed");
                }
                else {
                    if (requireNormals) {
                        var normals = mesh.normals;
                        replacedert.Greater(normals.Length,0);
                    }
                    if (requireTangents) {
                        var tangents = mesh.tangents;
                        replacedert.Greater(tangents.Length,0);
                    }
                }
            }
            await Task.Yield();
        }

19 Source : TaskQueue.cs
with GNU General Public License v3.0
from autodotua

public async void Start()
        {
            IsExcuting = true;
            List<Task> tasks = new List<Task>();
            // await Task.Run(() =>
            //{
            for (int i = 0; i < Configs.RefreshThreadCount; i++)
            {
                tasks.Add(Task.Factory.StartNew(Excute, TaskCreationOptions.LongRunning));
                //用Task.Run会变卡,可能是因为共用了线程;
                //之前的方法会卡UI可能也是这个原因
                //tasks.Add(Task.Run(() => Excute()));
                await Task.Delay(500 / Configs.RefreshThreadCount);
            }
            //});
            await Task.WhenAll(tasks);
            TaskStopped?.Invoke(this, new EventArgs());
        }

19 Source : LoadTestService.cs
with Apache License 2.0
from AutomateThePlanet

private List<Task> CreateTestTasks(int numberOfProcesses, int pauseBetweenStartSeconds, Action testBody)
        {
            var loadTasks = new List<Task>();
            for (int i = 0; i < numberOfProcesses; i++)
            {
                if (pauseBetweenStartSeconds > 0)
                {
                    Thread.Sleep(pauseBetweenStartSeconds * 1000);
                }

                loadTasks.Add(Task.Factory.StartNew(testBody));
            }

            return loadTasks;
        }

19 Source : ExportableResultsCollector.cs
with MIT License
from autostep

protected override async ValueTask OnResultsReady(ILifetimeScope scope, RunContext ctxt, WorkingResultSet results, CancellationToken cancelToken)
        {
            var logger = scope.Resolve<ILogger<ExportableResultsCollector>>();
            var allTasks = new List<Task>();

            var exporters = scope.Resolve<IEnumerable<IResultsExporter>>();

            logger.LogDebug(ExportableResultsCollectorMessages.StartingResultExport);

            // Invoke our exporters.
            foreach (var exporter in exporters)
            {
                allTasks.Add(InvokeExporter(exporter, scope, ctxt, results, cancelToken));
            }

            if (allTasks.Count == 0)
            {
                logger.LogWarning(ExportableResultsCollectorMessages.NoResultExporters);
            }
            else
            {
                await Task.WhenAll(allTasks).ConfigureAwait(false);

                logger.LogDebug(ExportableResultsCollectorMessages.EndResultExport);
            }
        }

19 Source : LatencyService.cs
with Apache License 2.0
from aws

public async Task<GetLatenciesResponse> GetLatencies(GetLatenciesRequest request)
        {
            try
            {
                if (request == null || request.Regions == null || !request.Regions.Any())
                {
                    return Response.Fail(new GetLatenciesResponse
                    {
                        ErrorCode = ErrorCode.InvalidParameters
                    });
                }

                var regionLatencyMap = new Dictionary<string, long>();

                var regionLatencyCalculationTasks = new List<Task>();

                foreach (string region in request.Regions)
                {
                    regionLatencyCalculationTasks.Add(
                            CalculateLatencyForRegion(regionLatencyMap, region)
                        );
                }

                await Task.WhenAll(regionLatencyCalculationTasks);

                return Response.Ok(new GetLatenciesResponse
                {
                    RegionLatencies = regionLatencyMap
                });
            }
            catch (Exception ex)
            {
                Logger.LogError(ex, ex.Message);

                return Response.Fail(new GetLatenciesResponse
                {
                    ErrorCode = ErrorCode.UnknownError,
                    ErrorMessage = ex.Message
                });
            }
        }

19 Source : LatencyService.cs
with Apache License 2.0
from aws

private List<Task<PingResult>> PingAddress(string address, int pingCount)
        {
            var pingReplyTasks = new List<Task<PingResult>>();

            for (int counter = 0; counter < pingCount; counter++)
            {
                pingReplyTasks.Add(_pingWrapper.SendPingAsync(address));
            }

            return pingReplyTasks;
        }

19 Source : ConfigurtaionBuilderIntegrationTestFixture.cs
with Apache License 2.0
from aws

private void seedTestData()
        {
            bool success = false;
            using (var client = AWSOptions.CreateServiceClient<IAmazonSimpleSystemsManagement>())
            {
                var tasks = new List<Task>();
                foreach (var kv in TestData)
                {
                    Console.WriteLine($"Adding parameter: ({ParameterPrefix + kv.Key}, {kv.Value})");
                    tasks.Add(client.PutParameterAsync(new PutParameterRequest
                    {
                        Name = ParameterPrefix + kv.Key,
                        Value = kv.Value,
                        Type = ParameterType.String
                    }));
                };
                Task.WaitAll(tasks.ToArray());

                // due to eventual consistency, wait for 5 sec increments for 3 times to verify
                // test data is correctly set before executing tests.
                const int tries = 3;
                for (int i = 0; i < tries; i++)
                {
                    int count = 0;
                    GetParametersByPathResponse response;
                    do
                    {
                        response = client.GetParametersByPathAsync(new GetParametersByPathRequest
                        {
                            Path = ParameterPrefix
                        }).Result;

                        count += response.Parameters.Count;
                    } while (!string.IsNullOrEmpty(response.NextToken));

                    success = (count == TestData.Count);

                    if (success)
                    {
                        Console.WriteLine("Verified that test data is available.");
                        break;
                    }
                    else
                    {
                        Console.WriteLine($"Waiting on test data to be available. Waiting {count + 1}/{tries}");
                        Thread.Sleep(5 * 1000);
                    }
                }
            }

            if (!success) throw new Exception("Failed to seed integration test data");
        }

19 Source : UtilsTest.cs
with Apache License 2.0
from aws

[Test]
        public void GenerateUniqueFileName_Maintains_Uniqueness_Across_Concurrent_Processes()
        {
            var filePath = "MyFile.json";
            var mutexName = "mutex";
            var tasks = new List<Task>();
            var fileNames = new List<string>();

            for (int i = 0; i < 100; i++)
            {
                tasks.Add(Task.Run(() => {
                    fileNames.Add(Utils.GenerateUniqueFileName(filePath, mutexName));
                }));
            }
            var t = Task.WhenAll(tasks);
            t.Wait();

            replacedert.True(t.Status == TaskStatus.RanToCompletion);
            Collectionreplacedert.AllItemsAreUnique(fileNames);
        }

19 Source : UtilsTest.cs
with Apache License 2.0
from aws

[Test]
        public void ThreadSafeExportStringToFile_Permits_Concurrent_Processes()
        {
            var filePath = Path.Combine(TempDir, "test.txt");
            var content = "Test file content";
            var tasks = new List<Task>();
            var filePaths = new List<string>();
            for (int i = 0; i < 10; i++)
            {
                tasks.Add(Task.Run(() => {
                    var writtenFilePath = Utils.ThreadSafeExportStringToFile(filePath, content);
                    filePaths.Add(writtenFilePath);
                }));
            }
            var t = Task.WhenAll(tasks);
            t.Wait();

            replacedert.True(t.Status == TaskStatus.RanToCompletion);

            // Since concurrent file access is permitted, there should be no naming conflicts,
            // and therefore no files should be renamed
            replacedert.True(filePaths.All(f => f.Equals(filePath)));

            // Confirm that text was written to file
            replacedert.AreEqual(content, File.ReadAllText(filePath));
        }

19 Source : ReportExporter.cs
with Apache License 2.0
from aws

public bool GenerateJsonReport(
            SolutionreplacedysisResult solutionreplacedysisResult,
            string outputFolder)
        {
            try
            {
                string SolutionName = solutionreplacedysisResult.SolutionDetails.SolutionName;
                string BaseDir = Path.Combine(outputFolder, SolutionName + replacedyzeRootFolder, SolutionreplacedyzeFolder);
                Dictionary<string, string> FailedProjects = new Dictionary<string, string>();

                solutionreplacedysisResult.ProjectreplacedysisResults.ForEach(projectreplacedysResult =>
                {
                    if (projectreplacedysResult == null)
                    {
                        return;
                    }
                    List<Task<bool>> writeToFiles = new List<Task<bool>>();
                    string ProjectName = projectreplacedysResult.ProjectName;
                    string FileDir = Path.Combine(BaseDir, ProjectName);
                    Directory.CreateDirectory(FileDir);
                    List<PackagereplacedysisResult> packagereplacedysisResults = new List<PackagereplacedysisResult>();
                    Dictionary<PackageVersionPair, string> packagereplacedysisResultErrors = new Dictionary<PackageVersionPair, string>();

                    projectreplacedysResult.PackagereplacedysisResults.ToList()
                    .ForEach(p =>
                    {
                        if (p.Value.IsCompletedSuccessfully)
                        {
                            packagereplacedysisResults.Add(p.Value.Result);
                        }
                        else
                        {
                            packagereplacedysisResultErrors.Add(p.Key, p.Value.Exception.Message);
                        };
                    });

                    //project apis replacedsis result
                    string ApireplacedyzeFileName = ProjectName + "-api-replacedysis.json";
                    var projectApireplacedysisResult = projectreplacedysResult.IsBuildFailed ? new ProjectApireplacedysisResult
                    {
                        Errors = new List<string> { $"Errors during compilation in {projectreplacedysResult.ProjectName}." },
                        SchemaVersion = Common.Model.Schema.version,
                        SolutionFile = SolutionName,
                        SolutionGuid = solutionreplacedysisResult.SolutionDetails.SolutionGuid,
                        ApplicationGuid = solutionreplacedysisResult.SolutionDetails.ApplicationGuid,
                        RepositoryUrl = solutionreplacedysisResult.SolutionDetails.RepositoryUrl,
                        ProjectFile = ProjectName,
                    } : new ProjectApireplacedysisResult
                    {
                        Errors = projectreplacedysResult.Errors,
                        SchemaVersion = Common.Model.Schema.version,
                        SolutionFile = SolutionName,
                        SolutionGuid = solutionreplacedysisResult.SolutionDetails.SolutionGuid,
                        ApplicationGuid = solutionreplacedysisResult.SolutionDetails.ApplicationGuid,
                        RepositoryUrl = solutionreplacedysisResult.SolutionDetails.RepositoryUrl,
                        ProjectFile = ProjectName,
                        SourceFilereplacedysisResults = projectreplacedysResult.SourceFilereplacedysisResults
                    };
                    writeToFiles.Add(WriteReportToFileAsync(projectApireplacedysisResult, Path.Combine(FileDir, ApireplacedyzeFileName)));

                    //project packages replacedsis result
                    string PackagereplacedyzeFileName = ProjectName + "-package-replacedysis.json";
                    writeToFiles.Add(WriteReportToFileAsync(packagereplacedysisResults, Path.Combine(FileDir, PackagereplacedyzeFileName)));

                    //project failed packages result
                    if (packagereplacedysisResultErrors != null && packagereplacedysisResultErrors.Count != 0)
                    {
                        string PackagereplacedyzeErrorFileName = ProjectName + "-package-replacedysis-error.json";
                        writeToFiles.Add(WriteReportToFileAsync(packagereplacedysisResults, Path.Combine(FileDir, PackagereplacedyzeErrorFileName)));
                    }
                    Task.WaitAll(writeToFiles.ToArray());

                });
                if (FailedProjects?.Count != 0)
                {
                    WriteReportToFileAsync(FailedProjects, Path.Combine(BaseDir, "failed.json")).Wait();
                }
                return true;

            }
            catch (Exception ex)
            {
                _logger.LogError("failed to generate replacedyze report: {0}", ex);
                return false;
            }
        }

19 Source : UtilityTest.cs
with Apache License 2.0
from awslabs

[Fact]
        public async Task TestThreadSafeRandom()
        {
            const int taskCount = 1000;
            var semapreplaced = new SemapreplacedSlim(0, taskCount);
            var cts = new CancellationTokenSource();
            var tasks = new List<Task>();
            for (var i = 0; i < taskCount; i++)
            {
                tasks.Add(MyThread(semapreplaced, cts.Token));
            }

            semapreplaced.Release(taskCount);

            await Task.Delay(20 * 1000);
            cts.Cancel();

            var newRandoms = new List<double>();
            for (var i = 0; i < 10; i++)
            {
                newRandoms.Add(Utility.Random.NextDouble());
            }

            // if the number of '0's is half the newly generated random, then we're failed
            replacedert.True(newRandoms.Count(d => d == 0) < 5);
        }

19 Source : ParallelExtensions.cs
with MIT License
from azist

public static void ParallelProcessVolumeBatchesStartingAt<TReader>(this IEnumerable<Stream> dataSource,
                                                              Security.ICryptoManager crypto,
                                                              long startPageId,
                                                              Func<IVolume, TReader> readerFactory,
                                                              Action<Page, TReader, Func<bool>> body,
                                                              Func<bool> cancel = null,
                                                              IPageCache pageCache = null,
                                                              Func<Security.ICryptoManager, IPageCache, Stream, IVolume> volumeFactory = null,
                                                              bool skipCorruptPages = false) where TReader : ArchiveReader
    {
      dataSource.NonNull(nameof(dataSource));
      crypto.NonNull(nameof(crypto));
      readerFactory.NonNull(nameof(readerFactory));
      body.NonNull(nameof(body));

      if (volumeFactory == null)
      {
        volumeFactory = (_1, _2, vstream) => new DefaultVolume(crypto, pageCache, vstream, ownsStream: false);
      }

      var readers = new List<TReader>();
      try
      {
        //mount all volumes
        foreach(var stream in dataSource)
        {
          if (stream==null || !stream.CanRead) continue;
          var volume = volumeFactory(crypto, pageCache, stream);
          var reader = readerFactory(volume);
          readers.Add(reader);
        }

        readers.IsTrue( _ => readers.Count > 0, "dataSource non empty");

        var main = readers[0];

        foreach(var pageSet in main.Volume.ReadPageInfos(startPageId).BatchBy(readers.Count))
        {
          if (cancel != null && cancel()) break;

          var tasks = new List<Task>();

          foreach(var pair in pageSet.Select((pi, i) => new KeyValuePair<long, TReader>(pi.PageId, readers[i % readers.Count])))
          {
            if (cancel != null && cancel()) break;

            tasks.Add(Task.Factory.StartNew(objKvp =>
            {
              var kvp = (KeyValuePair<long, TReader>)objKvp;
              var reader = kvp.Value;
              Page page = null;
              try
              {
                page = reader.GetOnePageAt(kvp.Key, exactPageId: true);
              }
              catch
              {
                if (!skipCorruptPages) throw;
              }

              if (page != null)
              {
                try
                {
                  body(page, reader, cancel);
                }
                finally
                {
                  reader.Recycle(page);
                }
              }
            }, pair));//Task
          }

          Task.WaitAll(tasks.ToArray());
        }
      }
      finally
      {
        readers.ForEach(r => r.Volume.Dispose());
      }
    }

19 Source : TodoQueueService.Processing.cs
with MIT License
from azist

private void processOneQueueBatch(TodoQueue queue, IEnumerable<TodoFrame> batch, DateTime utcNow)
      {
        if (queue.Mode == TodoQueue.ExecuteMode.Sequential)
        {
          batch.OrderBy(t => t.StartDate).ForEach(todo => executeOne(queue, todo, utcNow));
        }
        else if (queue.Mode == TodoQueue.ExecuteMode.Parallel)
        {
          Parallel.ForEach(batch.OrderBy(t => t.StartDate), todo => executeOne(queue, todo, utcNow));
        }
        else//ParallelByKey
        {
          var tasks = new List<Task>();

          var parallelTodos = batch.Where(t => t.ParallelKey == null).ToArray();
          if (parallelTodos.Length > 0)
            tasks.Add(Task.Factory.StartNew(ts => Parallel.ForEach(((IEnumerable<TodoFrame>)ts).OrderBy(t => t.StartDate), todo => executeOne(queue, todo, utcNow)), parallelTodos));

          List<TodoFrame> todos = null;
          string parallelKey = null;
          foreach (var todo in batch.Where(t => t.ParallelKey != null).OrderBy(t => t.ParallelKey))
          {
            if (parallelKey != todo.ParallelKey)
            {
              if (todos != null)
                tasks.Add(Task.Factory.StartNew(ts => ((IEnumerable<TodoFrame>)ts).OrderBy(t => t.StartDate).ForEach(t => executeOne(queue, t, utcNow)), todos));
              todos = new List<TodoFrame>();
              parallelKey = todo.ParallelKey;
            }
            todos.Add(todo);
          }

          if (todos != null)
            tasks.Add(Task.Factory.StartNew(ts => ((IEnumerable<TodoFrame>)ts).OrderBy(t => t.StartDate).ForEach(t => executeOne(queue, t, utcNow)), todos));

          Task.WaitAll(tasks.ToArray());
        }
      }

19 Source : PileForm.cs
with MIT License
from azist

private void btnPersonParaPut_Click(object sender, EventArgs e)
    {
      var cnt = tbPersonCount.Text.AsInt(10);
      var threads = tbPersonThreads.Text.AsInt(1);

      var tasks = new List<Task>();

      var w = Stopwatch.StartNew();

      for(var c=0;c<threads;c++)
        tasks.Add(Task.Factory.StartNew(()=>
        {
          for(var i=0; i<cnt;i++)
          {
            var obj = Person.MakeFake();
            var pp = m_Pile.Put( obj );
          }
        }));

      Task.WaitAll( tasks.ToArray());


      var elps = w.ElapsedMilliseconds;
      var total = cnt*threads;
      Text = "Added {0:n0} in {1:n0}ms at {2:n0}/sec".Args(total, elps, total /(elps/1000d));
    }

19 Source : MySQLDetailedTests.cs
with MIT License
from azist

[Run]
    public async Task ManyRowsInsertAndReadASYNC2()
    {
      using (var ds = makeDataStore())
      {
        var patients = new List<Patient>();
        var pending = new List<Task>();
        for (var i = 1; i < 2_000; i++)
        {
          var row = makePatient("Ivanov" + i);
          row.COUNTER = i;
          patients.Add(row);
          pending.Add(ds.InsertAsync(row));//note: No await here
        }

        await Task.WhenAll(pending);

        var qry = new Query<Patient>("CRUD.Queries.Patient.List") { new Query.Param("LN", "Ivanov%") };
        var result = (await ds.LoadEnumerableAsync(qry)).OrderBy(p => p.COUNTER);
        Aver.IsTrue(patients.Select(p => p.Last_Name).SequenceEqual(result.Select(r => r.Last_Name)));
      }
    }

19 Source : CacheFragmentationTest.cs
with MIT License
from azist

[Run("speed=true   durationSec=30  payloadSizeMin=2  payloadSizeMax=1000  deleteFreq=3  isParallel=true")]
    [Run("speed=false  durationSec=30  payloadSizeMin=2  payloadSizeMax=1000  deleteFreq=3  isParallel=true")]
    public void DeleteOne_TwoTables_ByteArray(bool speed, int durationSec, int payloadSizeMin, int payloadSizeMax, int deleteFreq, bool isParallel)
    {
      using (var cache = new LocalCache(NOPApplication.Instance))
      using (var pile = new DefaultPile(cache))
      {
        cache.Pile = pile;
        cache.PileAllocMode = speed ? AllocationMode.FavorSpeed : AllocationMode.ReuseSpace;
        cache.Start();

        var startTime = DateTime.UtcNow;
        var tasks = new List<Task>();
        for (var t = 0; t < (isParallel ? (System.Environment.ProcessorCount - 1) : 1); t++)
          tasks.Add(Task.Factory.StartNew(() =>
            {
              var i = 0;
              var list = new List<Tuple<int, GDID, int, byte, byte>>();
              var tA = cache.GetOrCreateTable<GDID>("A");
              var tB = cache.GetOrCreateTable<GDID>("B");
              var wlc = 0;

              while (true)
              {
                if ((DateTime.UtcNow - startTime).TotalSeconds >= durationSec) break;

                var payloadSize = Ambient.Random.NextScaledRandomInteger(payloadSizeMin, payloadSizeMax);
                var val = new byte[payloadSize];
                val[0] = (byte)Ambient.Random.NextRandomInteger;
                val[payloadSize - 1] = (byte)Ambient.Random.NextRandomInteger;

                var tableId = Ambient.Random.NextScaledRandomInteger(0, list.Count - 1);
                var table = tableId == 0 ? tA : tB;
                var key = new GDID((uint)Thread.CurrentThread.ManagedThreadId, (ulong)i);

                table.Put(key, val);

                list.Add(new Tuple<int, GDID, int, byte, byte>(tableId, key, payloadSize - 1, val[0], val[payloadSize - 1]));

                // delete ONE random element
                if (i > 0 && i % deleteFreq == 0)
                {
                  while (true && list.Count > 0)
                  {
                    var idx = Ambient.Random.NextScaledRandomInteger(0, list.Count - 1);
                    var element = list[idx];

                    table = element.Item1 == 0 ? tA : tB;
                    key = element.Item2;

                    var removed = table.Remove(key);
                    list.RemoveAt(idx);

                    if (removed)
                      break;
                  }
                }

                // get several random elements
                if (list.Count > 64 && Ambient.Random.NextScaledRandomInteger(0, 100) > 98)
                {
                  var toRead = Ambient.Random.NextScaledRandomInteger(8, 64);
                  wlc++;
                  if (wlc % 125 == 0)
                    Console.WriteLine("Thread {0} is reading {1} elements"
                      .Args(Thread.CurrentThread.ManagedThreadId, toRead));
                  for (var j = 0; j < toRead && list.Count > 0; j++)
                  {
                    var idx = Ambient.Random.NextScaledRandomInteger(0, list.Count - 1);
                    var element = list[idx];
                    table = element.Item1 == 0 ? tA : tB;
                    var buf = table.Get(element.Item2) as byte[];
                    if (buf == null)
                    {
                      list.RemoveAt(idx);
                      continue;
                    }
                    Aver.AreEqual(element.Item4, buf[0]);
                    Aver.AreEqual(element.Item5, buf[element.Item3]);
                  }

                }

                if (i == Int32.MaxValue)
                  i = 0;
                else
                  i++;

                if (list.Count == Int32.MaxValue)
                  list = new List<Tuple<int, GDID, int, byte, byte>>();
              }

              Console.WriteLine("Thread {0} is doing final read of {1} elements"
                .Args(Thread.CurrentThread.ManagedThreadId, list.Count));
              foreach (var element in list)
              {
                var table = element.Item1 == 0 ? tA : tB;
                var buf = table.Get(element.Item2) as byte[];
                if (buf == null)
                  continue;
                Aver.AreEqual(element.Item4, buf[0]);
                Aver.AreEqual(element.Item5, buf[element.Item3]);
              }
            }, TaskCreationOptions.LongRunning));
        Task.WaitAll(tasks.ToArray());
      }
    }

19 Source : PileFragmentationTest.cs
with MIT License
from azist

[Run("speed=true   durationSec=30  deleteFreq=3  isParallel=true")]
    [Run("speed=false  durationSec=30  deleteFreq=3  isParallel=true")]
    public static void DeleteOne_TRow(bool speed, int durationSec, int deleteFreq, bool isParallel)
    {
      using (var pile = new DefaultPile(NOPApplication.Instance))
      {
        pile.AllocMode = speed ? AllocationMode.FavorSpeed : AllocationMode.ReuseSpace;
        pile.Start();
        var startTime = DateTime.UtcNow;
        var tasks = new List<Task>();
        for (var t = 0; t < (isParallel ? (System.Environment.ProcessorCount - 1) : 1); t++)
          tasks.Add(Task.Factory.StartNew(() =>
            {
              var list = new List<CheckTRow>();
              var i = 0;
              var wlc = 0;
              while (true)
              {
                if ((DateTime.UtcNow - startTime).TotalSeconds >= durationSec) break;

                var val = PersonRow.MakeFake(new GDID(0, (ulong)i));

                var ptr = pile.Put(val);

                var element = new CheckTRow(ptr, val.ID, val.Address1);
                list.Add(element);

                // delete ONE random element
                if (i > 0 && i % deleteFreq == 0)
                {
                  var idx = Ambient.Random.NextScaledRandomInteger(0, list.Count - 1);
                  ptr = list[idx].Ptr;
                  pile.Delete(ptr);
                  list.RemoveAt(idx);
                }

                // get several random elements
                if (list.Count > 64 && Ambient.Random.NextScaledRandomInteger(0, 100) > 98)
                {
                  var toRead = Ambient.Random.NextScaledRandomInteger(8, 64);
                  wlc++;
                  if (wlc % 125 == 0)
                    Console.WriteLine("Thread {0} is reading {1} elements, total {2}"
                      .Args(Thread.CurrentThread.ManagedThreadId, toRead, list.Count));
                  for (var k = 0; k < toRead; k++)
                  {
                    element = list[Ambient.Random.NextScaledRandomInteger(0, list.Count - 1)];
                    var buf = pile.Get(element.Ptr) as PersonRow;
                    Aver.IsTrue(element.Id.Equals(buf.ID));
                    Aver.IsTrue(element.Address.Equals(buf.Address1));
                  }
                }

                if (i == Int32.MaxValue)
                  i = 0;
                else
                  i++;

                if (list.Count == Int32.MaxValue)
                  list = new List<CheckTRow>();
              }

              // total check
              Console.WriteLine("Thread {0} is doing final read of {1} elements, ObjectCount {2}"
                .Args(Thread.CurrentThread.ManagedThreadId, list.Count, pile.ObjectCount));
              foreach (var element in list)
              {
                var buf = pile.Get(element.Ptr) as PersonRow;
                Aver.IsTrue(element.Id.Equals(buf.ID));
                Aver.IsTrue(element.Address.Equals(buf.Address1));
              }
              return;
            }, TaskCreationOptions.LongRunning));
        Task.WaitAll(tasks.ToArray());
      }
    }

19 Source : PileFragmentationTest.cs
with MIT License
from azist

[Run("speed=true   durationSec=30  putMin=100  putMax=200  delFactor=4  isParallel=true")]
    [Run("speed=false  durationSec=30  putMin=100  putMax=200  delFactor=4  isParallel=true")]
    public static void DeleteSeveral_TRow(bool speed, int durationSec, int putMin, int putMax, int delFactor, bool isParallel)
    {
      using (var pile = new DefaultPile(NOPApplication.Instance))
      {
        pile.AllocMode = speed ? AllocationMode.FavorSpeed : AllocationMode.ReuseSpace;
        pile.Start();
        var startTime = DateTime.UtcNow;
        var tasks = new List<Task>();
        for (var t = 0; t < (isParallel ? (System.Environment.ProcessorCount - 1) : 1); t++)
          tasks.Add(Task.Factory.StartNew(() =>
            {
              var list = new List<CheckTRow>();
              var wlc = 0;
              while (true)
              {
                if ((DateTime.UtcNow - startTime).TotalSeconds >= durationSec) break;

                var putCount = Ambient.Random.NextScaledRandomInteger(putMin, putMax);
                for (int i = 0; i < putCount; i++)
                {
                  var val = PersonRow.MakeFake(new GDID());
                  var ptr = pile.Put(val);
                  list.Add(new CheckTRow(ptr, val.ID, val.Address1));
                }

                // delete several random elements
                int delCount = putCount / delFactor;
                for (int i = 0; i < delCount; i++)
                {
                  var idx = Ambient.Random.NextScaledRandomInteger(0, list.Count - 1);
                  var ptr = list[idx].Ptr;
                  pile.Delete(ptr);
                  list.RemoveAt(idx);
                }

                // get several random elements
                if (list.Count > 64 && Ambient.Random.NextScaledRandomInteger(0, 100) > 98)
                {
                  var toRead = Ambient.Random.NextScaledRandomInteger(8, 64);
                  wlc++;
                  if (wlc % 125 == 0)
                    Console.WriteLine("Thread {0} is reading {1} elements, total {2}"
                      .Args(Thread.CurrentThread.ManagedThreadId, toRead, list.Count));
                  for (var k = 0; k < toRead; k++)
                  {
                    var element = list[Ambient.Random.NextScaledRandomInteger(0, list.Count - 1)];
                    var buf = pile.Get(element.Ptr) as PersonRow;
                    Aver.IsTrue(element.Id.Equals(buf.ID));
                    Aver.IsTrue(element.Address.Equals(buf.Address1));
                  }
                }
              }

              // total check
              Console.WriteLine("Thread {0} is doing final read of {1} elements, objectCount {2}"
                .Args(Thread.CurrentThread.ManagedThreadId, list.Count, pile.ObjectCount));
              foreach (var element in list)
              {
                var buf = pile.Get(element.Ptr) as PersonRow;
                Aver.IsTrue(element.Id.Equals(buf.ID));
                Aver.IsTrue(element.Address.Equals(buf.Address1));
              }
              return;
            }, TaskCreationOptions.LongRunning));
        Task.WaitAll(tasks.ToArray());
      }
    }

19 Source : PileFragmentationTest.cs
with MIT License
from azist

[Run("cnt=100000  durationSec=30  speed=true   payloadSizeMin=2  payloadSizeMax=8000  deleteFreq=3   isParallel=true")]
    [Run("cnt=100000  durationSec=30  speed=false  payloadSizeMin=2  payloadSizeMax=200   deleteFreq=10  isParallel=true")]
    public static void Put_RandomDelete_ByteArray(int cnt, int durationSec, bool speed, int payloadSizeMin, int payloadSizeMax, int deleteFreq, bool isParallel)
    {
      using (var pile = new DefaultPile(NOPApplication.Instance))
      {
        pile.AllocMode = speed ? AllocationMode.FavorSpeed : AllocationMode.ReuseSpace;
        pile.Start();
        var startTime = DateTime.UtcNow;
        var tasks = new List<Task>();
        for (var t = 0; t < (isParallel ? (System.Environment.ProcessorCount - 1) : 1); t++)
          tasks.Add(Task.Factory.StartNew(() =>
            {
              var wlc = 0;
              while (true)
              {
                if ((DateTime.UtcNow - startTime).TotalSeconds >= durationSec) return;

                var dict = new Dictionary<int, CheckByteArray>();


                Console.WriteLine("Starting a batch of {0}".Args(cnt));
                for (int i = 0; i < cnt; i++)
                {
                  var payloadSize = Ambient.Random.NextScaledRandomInteger(payloadSizeMin, payloadSizeMax);
                  var val = new byte[payloadSize];
                  val[0] = (byte)Ambient.Random.NextRandomInteger;
                  val[payloadSize - 1] = (byte)Ambient.Random.NextRandomInteger;

                  var ptr = pile.Put(val);

                  var element = new CheckByteArray(ptr, payloadSize - 1, val[0], val[payloadSize - 1]);
                  dict.Add(i, element);

                  if (dict.Count > 0 && i % deleteFreq == 0)
                  {
                    while (true)
                    {
                      var idx = i - Ambient.Random.NextScaledRandomInteger(0, i);

                      CheckByteArray stored;
                      if (dict.TryGetValue(idx, out stored))
                      {
                        ptr = stored.Ptr;
                        pile.Delete(ptr);
                        dict.Remove(idx);
                        break;
                      }
                    }
                  }

                  if (dict.Count > 16 && Ambient.Random.NextScaledRandomInteger(0, 100) > 98)
                  {
                    var toRead = Ambient.Random.NextScaledRandomInteger(8, 64);
                    wlc++;
                    if (wlc % 125 == 0)
                      Console.WriteLine("Thread {0} is reading {1} elements, total {2}"
                        .Args(Thread.CurrentThread.ManagedThreadId, toRead, dict.Count));
                    for (var k = 0; k < toRead; k++)
                    {
                      var kvp = dict.Skip(Ambient.Random.NextScaledRandomInteger(0, dict.Count - 1)).First();
                      var buf = pile.Get(kvp.Value.Ptr) as byte[];
                      Aver.AreEqual(kvp.Value.FirstByte, buf[0]);
                      Aver.AreEqual(kvp.Value.LastByte, buf[kvp.Value.IdxLast]);
                    }
                  }
                }

                Console.WriteLine("Thread {0} is doing final read of {1} elements".Args(Thread.CurrentThread.ManagedThreadId, dict.Count));
                foreach (var kvp in dict)
                {
                  var buf = pile.Get(kvp.Value.Ptr) as byte[];
                  Aver.AreEqual(kvp.Value.FirstByte, buf[0]);
                  Aver.AreEqual(kvp.Value.LastByte, buf[kvp.Value.IdxLast]);
                }
              }
            }, TaskCreationOptions.LongRunning));
        Task.WaitAll(tasks.ToArray());
      }
    }

19 Source : PileFragmentationTest.cs
with MIT License
from azist

[Run("speed=true   durationSec=30  putMin=100  putMax=200  delFactor=4  payloadSizeMin=2  payloadSizeMax=1000  isParallel=true")]
    [Run("speed=false  durationSec=30  putMin=100  putMax=200  delFactor=4  payloadSizeMin=2  payloadSizeMax=1000  isParallel=true")]
    public static void DeleteSeveral_ByteArray(bool speed, int durationSec, int putMin, int putMax, int delFactor, int payloadSizeMin, int payloadSizeMax, bool isParallel)
    {
      using (var pile = new DefaultPile(NOPApplication.Instance))
      {
        pile.AllocMode = speed ? AllocationMode.FavorSpeed : AllocationMode.ReuseSpace;
        pile.Start();
        var startTime = DateTime.UtcNow;
        var tasks = new List<Task>();
        for (var t = 0; t < (isParallel ? (System.Environment.ProcessorCount - 1) : 1); t++)
          tasks.Add(Task.Factory.StartNew(() =>
            {
              var list = new List<CheckByteArray>();
              var wlc = 0;
              while (true)
              {
                if ((DateTime.UtcNow - startTime).TotalSeconds >= durationSec) break;

                var putCount = Ambient.Random.NextScaledRandomInteger(putMin, putMax);
                for (int i = 0; i < putCount; i++)
                {
                  var payloadSize = Ambient.Random.NextScaledRandomInteger(payloadSizeMin, payloadSizeMax);
                  var val = new byte[payloadSize];
                  val[0] = (byte)Ambient.Random.NextRandomInteger;
                  val[payloadSize - 1] = (byte)Ambient.Random.NextRandomInteger;

                  var ptr = pile.Put(val);

                  list.Add(new CheckByteArray(ptr, payloadSize - 1, val[0], val[payloadSize - 1]));
                }

                int delCount = putCount / delFactor;
                for (int i = 0; i < delCount; i++)
                {
                  var idx = Ambient.Random.NextScaledRandomInteger(0, list.Count - 1);
                  var ptr = list[idx].Ptr;
                  pile.Delete(ptr);
                  list.RemoveAt(idx);
                }

                // get several random elements
                if (list.Count > 64 && Ambient.Random.NextScaledRandomInteger(0, 100) > 98)
                {
                  var toRead = Ambient.Random.NextScaledRandomInteger(8, 64);
                  wlc++;
                  if (wlc % 125 == 0)
                    Console.WriteLine("Thread {0} is reading {1} elements, total {2}"
                      .Args(Thread.CurrentThread.ManagedThreadId, toRead, list.Count));
                  for (var k = 0; k < toRead; k++)
                  {
                    var element = list[Ambient.Random.NextScaledRandomInteger(0, list.Count - 1)];
                    var buf = pile.Get(element.Ptr) as byte[];
                    Aver.AreEqual(element.FirstByte, buf[0]);
                    Aver.AreEqual(element.LastByte, buf[element.IdxLast]);
                  }
                }

                if (list.Count == Int32.MaxValue)
                  list = new List<CheckByteArray>();
              }

              // total check
              Console.WriteLine("Thread {0} is doing final read of {1} elements, objectCount {2}"
                .Args(Thread.CurrentThread.ManagedThreadId, list.Count, pile.ObjectCount));
              foreach (var element in list)
              {
                var buf = pile.Get(element.Ptr) as byte[];
                Aver.AreEqual(element.FirstByte, buf[0]);
                Aver.AreEqual(element.LastByte, buf[element.IdxLast]);
              }
              return;
            }));
        Task.WaitAll(tasks.ToArray());
      }
    }

19 Source : FIDTests.cs
with MIT License
from azist

[Run("CNT=10000  tCNT= 4   ")]
    [Run("CNT=10000  tCNT= 4   ")]
    [Run("CNT=40000  tCNT= 4   ")]
    [Run("CNT=10000  tCNT= 10  ")]
    [Run("CNT=10000  tCNT= 10  ")]
    [Run("CNT=40000  tCNT= 10  ")]
    [Run("CNT=10000  tCNT= 100 ")]
    [Run("CNT=10000  tCNT= 100 ")]
    [Run("CNT=40000  tCNT= 100 ")]

    [Run("CNT=250000  tCNT= 4 ")]
    [Run("CNT=250000  tCNT= 2 ")]

    //reexecute same test many times
    [Run("CNT=8000   tCNT=101 ")]
    [Run("CNT=3799   tCNT=102 ")]
    [Run("CNT=8000   tCNT=25 ")]
    [Run("CNT=16000  tCNT=25 ")]
    [Run("CNT=250000  tCNT=25 ")]
    public void FID4(int CNT, int tCNT)
    {
      var tasks = new List<Task>();
      var sets = new List<FID>();
      var bag = new ConcurrentBag<FID[]>();

      for (var c = 0; c < tCNT; c++)
      {
        tasks.Add(Task.Factory.StartNew(() =>
       {
         var set = new FID[CNT];

         for (var i = 0; i < CNT; i++)
         {
           set[i] = FID.Generate();
         }

         bag.Add(set);
       }));
      }

      Task.WaitAll(tasks.ToArray());

      foreach (var set in bag)
        sets.AddRange(set);


      "replacedyzing {0:n} FIDs".SeeArgs(sets.Count);
      Aver.IsTrue(sets.AsParallel().Distinct().Count() == sets.Count());
      "Done. All ok".See();
    }

19 Source : GlueForm.cs
with MIT License
from azist

private void warmup()
        {
          var lst = new List<Task>();
          for(var ti=0; ti<Environment.ProcessorCount*8; ti++)
           lst.Add( Task.Factory.StartNew(
             () =>
             {
               long sum = 0;
               for(var k=0; k<500000000; k++)
                sum +=k;
              return sum;
             }
             ));

          Task.WaitAll(lst.ToArray());
          GC.Collect(2);
        }

19 Source : PileForm.cs
with MIT License
from azist

private void btnPersonParaGet_Click(object sender, EventArgs e)
      {
        if (lbPerson.SelectedItem==null) return;
        var pp = (PilePointer)lbPerson.SelectedItem;


        var cnt = tbPersonCount.Text.AsInt(10);
        var threads = tbPersonThreads.Text.AsInt(1);

        var tasks = new List<Task>();

        var w = Stopwatch.StartNew();

        for(var c=0;c<threads;c++)
          tasks.Add(Task.Factory.StartNew(()=>
          {
            for(var i=0; i<cnt;i++)
            {
              var obj = m_Pile.Get( pp );
            }
          }, TaskCreationOptions.LongRunning));

        Task.WaitAll( tasks.ToArray());


        var elps = w.ElapsedMilliseconds;
        var total = cnt*threads;
        Text = "Got {0:n0} in {1:n0}ms at {2:n0}/sec".Args(total, elps, total /(elps/1000d));
      }

19 Source : MySQLDetailedTests.cs
with MIT License
from azist

[Run]
    public async Task ParallelDatastores3ASYNC()
    {
      var pending = new List<Task>();

      for(var i=0; i<500; i++)
        pending.Add(body(i));

      await Task.WhenAll(pending);

      async Task body(int i)
      {
        var row = makePatient("Ivanov" + i.ToString());
        row.SSN = (10000 + i).ToString();
        using (var ds = makeDataStore())
        {
          await ds.InsertAsync(row);

          var qry = new Query("CRUD.Queries.Patient.UpdateAmount")
          {
            new Query.Param("pAmount", 100M + i),
            new Query.Param("pSSN", row.SSN)
          };
          await ds.ExecuteAsync(qry);

          var listQry = new Query<Patient>("CRUD.Queries.Patient.List") { new Query.Param("LN", "Ivanov" + i.ToString()) };
          var result = await ds.LoadDocAsync(listQry);

          Aver.AreEqual(row.Last_Name, result.Last_Name);
          Aver.AreEqual(row.SSN, result.SSN);
          Aver.AreEqual(100M + i, result.Amount);
        }
      };
    }

19 Source : CacheFragmentationTest.cs
with MIT License
from azist

[Run("speed=true   durationSec=30  payloadSizeMin=2  payloadSizeMax=1000  isParallel=true")]
    [Run("speed=false  durationSec=30  payloadSizeMin=2  payloadSizeMax=1000  isParallel=true")]
    public void Chessboard_ByteArray(bool speed, int durationSec, int payloadSizeMin, int payloadSizeMax, bool isParallel)
    {
      using (var cache = new LocalCache(NOPApplication.Instance))
      using (var pile = new DefaultPile(cache))
      {
        cache.Pile = pile;
        cache.PileAllocMode = speed ? AllocationMode.FavorSpeed : AllocationMode.ReuseSpace;
        cache.Start();

        var startTime = DateTime.UtcNow;
        var tasks = new List<Task>();
        for (var t = 0; t < (isParallel ? (System.Environment.ProcessorCount - 1) : 1); t++)
          tasks.Add(Task.Factory.StartNew(() =>
            {
              var list = new List<CheckByteArray>();
              var i = 0;
              var tA = cache.GetOrCreateTable<GDID>("A");
              var wlc = 0;
              while (true)
              {
                if ((DateTime.UtcNow - startTime).TotalSeconds >= durationSec) break;

                var payloadSize = Ambient.Random.NextScaledRandomInteger(payloadSizeMin, payloadSizeMax);
                var val = new byte[payloadSize];
                val[0] = (byte)Ambient.Random.NextRandomInteger;
                val[payloadSize - 1] = (byte)Ambient.Random.NextRandomInteger;

                var key = new GDID((uint)Thread.CurrentThread.ManagedThreadId, (ulong)i);
                tA.Put(key, val);

                var element = new CheckByteArray(key, payloadSize - 1, val[0], val[payloadSize - 1]);
                list.Add(element);

                // delete previous element
                if (list.Count > 1 && i % 2 == 0)
                {
                  key = list[list.Count - 2].Key;
                  tA.Remove(key);
                  list.RemoveAt(list.Count - 2);
                }

                // get several random elements
                if (list.Count > 64 && Ambient.Random.NextScaledRandomInteger(0, 100) > 98)
                {
                  var toRead = Ambient.Random.NextScaledRandomInteger(8, 64);
                  wlc++;
                  if (wlc % 125 == 0)
                    Console.WriteLine("Thread {0} is reading {1} elements, total {2}"
                      .Args(Thread.CurrentThread.ManagedThreadId, toRead, list.Count));
                  for (var k = 0; k < toRead && list.Count > 0; k++)
                  {
                    var idx = Ambient.Random.NextScaledRandomInteger(0, list.Count - 1);
                    element = list[idx];
                    var buf = tA.Get(element.Key) as byte[];
                    if (buf == null)
                    {
                      list.RemoveAt(idx);
                      continue;
                    }
                    Aver.AreEqual(element.FirstByte, buf[0]);
                    Aver.AreEqual(element.LastByte, buf[element.IdxLast]);
                  }
                }

                if (i == Int32.MaxValue)
                  i = 0;
                else
                  i++;

                if (list.Count == Int32.MaxValue)
                  list = new List<CheckByteArray>();
              }

              // total check
              Console.WriteLine("Thread {0} is doing final read of {1} elements, tableCount {2}"
                .Args(Thread.CurrentThread.ManagedThreadId, list.Count, tA.Count));
              foreach (var element in list)
              {
                var buf = tA.Get(element.Key) as byte[];
                if (buf == null)
                  continue;
                Aver.AreEqual(element.FirstByte, buf[0]);
                Aver.AreEqual(element.LastByte, buf[element.IdxLast]);
              }
              return;
            }, TaskCreationOptions.LongRunning));
        Task.WaitAll(tasks.ToArray());
      }
    }

19 Source : PileFragmentationTest.cs
with MIT License
from azist

[Run("speed=true   durationSec=30  payloadSizeMin=2  payloadSizeMax=1000  isParallel=true")]
    [Run("speed=false  durationSec=30  payloadSizeMin=2  payloadSizeMax=1000  isParallel=true")]
    public static void Chessboard_ByteArray(bool speed, int durationSec, int payloadSizeMin, int payloadSizeMax, bool isParallel)
    {
      using (var pile = new DefaultPile(NOPApplication.Instance))
      {
        pile.AllocMode = speed ? AllocationMode.FavorSpeed : AllocationMode.ReuseSpace;
        pile.Start();
        var startTime = DateTime.UtcNow;
        var tasks = new List<Task>();
        for (var t = 0; t < (isParallel ? (System.Environment.ProcessorCount - 1) : 1); t++)
          tasks.Add(Task.Factory.StartNew(() =>
            {
              var list = new List<CheckByteArray>();
              var i = 0;
              var wlc = 0;
              while (true)
              {
                if ((DateTime.UtcNow - startTime).TotalSeconds >= durationSec) break;

                var payloadSize = Ambient.Random.NextScaledRandomInteger(payloadSizeMin, payloadSizeMax);
                var val = new byte[payloadSize];
                val[0] = (byte)Ambient.Random.NextRandomInteger;
                val[payloadSize - 1] = (byte)Ambient.Random.NextRandomInteger;

                var ptr = pile.Put(val);

                var element = new CheckByteArray(ptr, payloadSize - 1, val[0], val[payloadSize - 1]);
                list.Add(element);

                // delete previous element
                if (list.Count > 1 && i % 2 == 0)
                {
                  ptr = list[list.Count - 2].Ptr;
                  pile.Delete(ptr);
                  list.RemoveAt(list.Count - 2);
                }

                // get several random elements
                if (list.Count > 64 && Ambient.Random.NextScaledRandomInteger(0, 100) > 98)
                {
                  var toRead = Ambient.Random.NextScaledRandomInteger(8, 64);
                  wlc++;
                  if (wlc % 125 == 0)
                    Console.WriteLine("Thread {0} is reading {1} elements, total {2}, Pile objects {3}, Pile segments {4} Pile Bytes {5}"
                      .Args(Thread.CurrentThread.ManagedThreadId, toRead, list.Count, pile.ObjectCount, pile.SegmentCount, pile.AllocatedMemoryBytes));
                  for (var k = 0; k < toRead; k++)
                  {
                    element = list[Ambient.Random.NextScaledRandomInteger(0, list.Count - 1)];
                    var buf = pile.Get(element.Ptr) as byte[];
                    Aver.AreEqual(element.FirstByte, buf[0]);
                    Aver.AreEqual(element.LastByte, buf[element.IdxLast]);
                  }
                }

                if (i == Int32.MaxValue)
                  i = 0;
                else
                  i++;

                if (list.Count == Int32.MaxValue)
                  list = new List<CheckByteArray>();
              }

              // total check
              Console.WriteLine("Thread {0} is doing final read of {1} elements, ObjectCount {2}"
                .Args(Thread.CurrentThread.ManagedThreadId, list.Count, pile.ObjectCount));
              foreach (var element in list)
              {
                var buf = pile.Get(element.Ptr) as byte[];
                Aver.AreEqual(element.FirstByte, buf[0]);
                Aver.AreEqual(element.LastByte, buf[element.IdxLast]);
              }
              return;
            }, TaskCreationOptions.LongRunning));
        Task.WaitAll(tasks.ToArray());
      }
    }

19 Source : PileFragmentationTest.cs
with MIT License
from azist

[Run("speed=true   durationSec=30  isParallel=true")]
    [Run("speed=false  durationSec=30  isParallel=true")]
    public static void Chessboard_TRow(bool speed, int durationSec, bool isParallel)
    {
      using (var pile = new DefaultPile(NOPApplication.Instance))
      {
        pile.AllocMode = speed ? AllocationMode.FavorSpeed : AllocationMode.ReuseSpace;
        pile.Start();
        var startTime = DateTime.UtcNow;
        var tasks = new List<Task>();
        for (var t = 0; t < (isParallel ? (System.Environment.ProcessorCount - 1) : 1); t++)
          tasks.Add(Task.Factory.StartNew(() =>
            {
              var list = new List<CheckTRow>();
              var i = 0;
              var wlc = 0;
              while (true)
              {
                if ((DateTime.UtcNow - startTime).TotalSeconds >= durationSec) break;

                var val = PersonRow.MakeFake(new GDID(0, (ulong)i));

                var ptr = pile.Put(val);

                var element = new CheckTRow(ptr, val.ID, val.Address1);
                list.Add(element);

                // delete previous element
                if (list.Count > 1 && i % 2 == 0)
                {
                  ptr = list[list.Count - 2].Ptr;
                  pile.Delete(ptr);
                  list.RemoveAt(list.Count - 2);
                }

                // get several random elements
                if (list.Count > 64 && Ambient.Random.NextScaledRandomInteger(0, 100) > 98)
                {
                  var toRead = Ambient.Random.NextScaledRandomInteger(8, 64);
                  wlc++;
                  if (wlc % 125 == 0)
                    Console.WriteLine("Thread {0} is reading {1} elements, total {2}, Pile objects {3}, Pile segments {4} Pile Bytes {5}"
                      .Args(Thread.CurrentThread.ManagedThreadId, toRead, list.Count, pile.ObjectCount, pile.SegmentCount, pile.AllocatedMemoryBytes));
                  for (var k = 0; k < toRead; k++)
                  {
                    element = list[Ambient.Random.NextScaledRandomInteger(0, list.Count - 1)];
                    var buf = pile.Get(element.Ptr) as PersonRow;
                    Aver.IsTrue(element.Id.Equals(buf.ID));
                    Aver.IsTrue(element.Address.Equals(buf.Address1));
                  }
                }

                if (i == Int32.MaxValue)
                  i = 0;
                else
                  i++;

                if (list.Count == Int32.MaxValue)
                  list = new List<CheckTRow>();
              }

              // total check
              Console.WriteLine("Thread {0} is doing final read of {1} elements, objectCount {2}"
                .Args(Thread.CurrentThread.ManagedThreadId, list.Count, pile.ObjectCount));
              foreach (var element in list)
              {
                var buf = pile.Get(element.Ptr) as PersonRow;
                Aver.IsTrue(element.Id.Equals(buf.ID));
                Aver.IsTrue(element.Address.Equals(buf.Address1));
              }
              return;
            }, TaskCreationOptions.LongRunning));
        Task.WaitAll(tasks.ToArray());
      }
    }

19 Source : CacheFragmentationTest.cs
with MIT License
from azist

[Run("speed=true   durationSec=30  payloadSizeMin=2  payloadSizeMax=1000  deleteFreq=3  isParallel=true")]
    [Run("speed=false  durationSec=30  payloadSizeMin=2  payloadSizeMax=1000  deleteFreq=3  isParallel=true")]
    public void DeleteOne_ByteArray(bool speed, int durationSec, int payloadSizeMin, int payloadSizeMax, int deleteFreq, bool isParallel)
    {
      using (var cache = new LocalCache(NOPApplication.Instance))
      using (var pile = new DefaultPile(cache))
      {
        cache.Pile = pile;
        cache.PileAllocMode = speed ? AllocationMode.FavorSpeed : AllocationMode.ReuseSpace;
        cache.Start();

        var startTime = DateTime.UtcNow;
        var tasks = new List<Task>();
        for (var t = 0; t < (isParallel ? (System.Environment.ProcessorCount - 1) : 1); t++)
          tasks.Add(Task.Factory.StartNew(() =>
            {
              var i = 0;
              var list = new List<CheckByteArray>();
              var tA = cache.GetOrCreateTable<GDID>("A");
              var wlc = 0;
              while (true)
              {
                if ((DateTime.UtcNow - startTime).TotalSeconds >= durationSec) break;

                var payloadSize = Ambient.Random.NextScaledRandomInteger(payloadSizeMin, payloadSizeMax);
                var val = new byte[payloadSize];
                val[0] = (byte)Ambient.Random.NextRandomInteger;
                val[payloadSize - 1] = (byte)Ambient.Random.NextRandomInteger;

                var key = new GDID((uint)Thread.CurrentThread.ManagedThreadId, (ulong)i);
                tA.Put(key, val);

                list.Add(new CheckByteArray(key, payloadSize - 1, val[0], val[payloadSize - 1]));

                // delete ONE random element
                if (i > 0 && i % deleteFreq == 0)
                {
                  while (true && list.Count > 0)
                  {
                    var idx = Ambient.Random.NextScaledRandomInteger(0, list.Count - 1);
                    key = list[idx].Key;
                    var removed = tA.Remove(key);
                    list.RemoveAt(idx);
                    if (removed)
                      break;
                  }
                }

                // get several random elements
                if (list.Count > 64 && Ambient.Random.NextScaledRandomInteger(0, 100) > 98)
                {
                  var toRead = Ambient.Random.NextScaledRandomInteger(8, 64);
                  wlc++;
                  if (wlc % 125 == 0)
                    Console.WriteLine("Thread {0} is reading {1} elements, total {2}"
                      .Args(Thread.CurrentThread.ManagedThreadId, toRead, list.Count));
                  for (var k = 0; k < toRead && list.Count > 0; k++)
                  {
                    var idx = Ambient.Random.NextScaledRandomInteger(0, list.Count - 1);
                    var element = list[idx];
                    var buf = tA.Get(element.Key) as byte[];
                    if (buf == null)
                    {
                      list.RemoveAt(idx);
                      continue;
                    }
                    Aver.AreEqual(element.FirstByte, buf[0]);
                    Aver.AreEqual(element.LastByte, buf[element.IdxLast]);
                  }

                }

                if (i == Int32.MaxValue)
                  i = 0;
                else
                  i++;

                if (list.Count == Int32.MaxValue)
                  list = new List<CheckByteArray>();
              }

              Console.WriteLine("Thread {0} is doing final read of {1} elements, tableCount {2}"
                .Args(Thread.CurrentThread.ManagedThreadId, list.Count, tA.Count));
              foreach (var element in list)
              {
                var buf = tA.Get(element.Key) as byte[];
                if (buf == null)
                  continue;
                Aver.AreEqual(element.FirstByte, buf[0]);
                Aver.AreEqual(element.LastByte, buf[element.IdxLast]);
              }
            }, TaskCreationOptions.LongRunning));
        Task.WaitAll(tasks.ToArray());
      }
    }

19 Source : CacheFragmentationTest.cs
with MIT License
from azist

[Run("speed=true   durationSec=30  putMin=100  putMax=200  delFactor=4  payloadSizeMin=2  payloadSizeMax=1000  isParallel=true")]
    [Run("speed=false  durationSec=30  putMin=100  putMax=200  delFactor=4  payloadSizeMin=2  payloadSizeMax=1000  isParallel=true")]
    public void DeleteSeveral_ByteArray(bool speed, int durationSec, int putMin, int putMax, int delFactor, int payloadSizeMin, int payloadSizeMax, bool isParallel)
    {
      using (var cache = new LocalCache(NOPApplication.Instance))
      using (var pile = new DefaultPile(cache))
      {
        cache.Pile = pile;
        cache.PileAllocMode = speed ? AllocationMode.FavorSpeed : AllocationMode.ReuseSpace;
        cache.Start();

        var startTime = DateTime.UtcNow;
        var tasks = new List<Task>();
        for (var t = 0; t < (isParallel ? (System.Environment.ProcessorCount - 1) : 1); t++)
          tasks.Add(Task.Factory.StartNew(() =>
            {
              var list = new List<CheckByteArray>();
              var tA = cache.GetOrCreateTable<GDID>("A");
              ulong k = 0;
              var wlc = 0;

              while (true)
              {
                if ((DateTime.UtcNow - startTime).TotalSeconds >= durationSec) break;

                var putCount = Ambient.Random.NextScaledRandomInteger(putMin, putMax);
                for (int i = 0; i < putCount; i++)
                {
                  var payloadSize = Ambient.Random.NextScaledRandomInteger(payloadSizeMin, payloadSizeMax);
                  var val = new byte[payloadSize];
                  val[0] = (byte)Ambient.Random.NextRandomInteger;
                  val[payloadSize - 1] = (byte)Ambient.Random.NextRandomInteger;
                  var key = new GDID((uint)Thread.CurrentThread.ManagedThreadId, k);

                  tA.Put(key, val);

                  list.Add(new CheckByteArray(key, payloadSize - 1, val[0], val[payloadSize - 1]));
                  k++;
                }

                int delCount = putCount / delFactor;
                for (int i = 0; i < delCount; i++)
                {
                  while (true && list.Count > 0)
                  {
                    var idx = Ambient.Random.NextScaledRandomInteger(0, list.Count - 1);
                    var key = list[idx].Key;
                    var removed = tA.Remove(key);
                    list.RemoveAt(idx);
                    if (removed)
                      break;
                  }
                }

                // get several random elements
                if (list.Count > 64 && Ambient.Random.NextScaledRandomInteger(0, 100) > 98)
                {
                  var toRead = Ambient.Random.NextScaledRandomInteger(8, 64);
                  wlc++;
                  if (wlc % 125 == 0)
                    Console.WriteLine("Thread {0} is reading {1} elements, total {2}"
                      .Args(Thread.CurrentThread.ManagedThreadId, toRead, list.Count));
                  for (var j = 0; j < toRead && list.Count > 0; j++)
                  {
                    var idx = Ambient.Random.NextScaledRandomInteger(0, list.Count - 1);
                    var element = list[idx];
                    var buf = tA.Get(element.Key) as byte[];
                    if (buf == null)
                    {
                      list.RemoveAt(idx);
                      continue;
                    }
                    Aver.AreEqual(element.FirstByte, buf[0]);
                    Aver.AreEqual(element.LastByte, buf[element.IdxLast]);
                  }
                }

                if (list.Count == Int32.MaxValue)
                  list = new List<CheckByteArray>();
              }

              // total check
              Console.WriteLine("Thread {0} is doing final read of {1} elements, tableCount {2}"
                .Args(Thread.CurrentThread.ManagedThreadId, list.Count, tA.Count));
              foreach (var element in list)
              {
                var val = tA.Get(element.Key) as byte[];
                if (val == null)
                  continue;
                Aver.AreEqual(element.FirstByte, val[0]);
                Aver.AreEqual(element.LastByte, val[element.IdxLast]);
              }
              return;
            }, TaskCreationOptions.LongRunning));
        Task.WaitAll(tasks.ToArray());
      }
    }

19 Source : ARowBenchmarkingParallel.cs
with MIT License
from azist

[Run("cnt=250000 tcnt=12")]
      [Run("cnt=250000 tcnt=10")]
      [Run("cnt=250000 tcnt=8")]
      [Run("cnt=250000 tcnt=4")]
      public void Deserialize_SimplePerson_Slim(int CNT, int tcnt)
      {
        var row = getSimplePerson();

        var tasks = new List<Task>();
        var sw = Stopwatch.StartNew();
        tasks.Add( Task.Factory.StartNew( ()=>
        {
            var slim = new Azos.Serialization.Slim.SlimSerializer( Azos.Serialization.Slim.TypeRegistry.BoxedCommonNullableTypes,
                                                                  Azos.Serialization.Slim.TypeRegistry.BoxedCommonTypes,
                                                                  new []{ typeof(SimplePersonRow) });

            slim.TypeMode = Azos.Serialization.Slim.TypeRegistryMode.Batch;//give slim all possible preferences

            using(var ms = new MemoryStream())
            {
              slim.Serialize(ms, row);//warmup
              for(var i=0; i<CNT; i++)
              {
                ms.Position = 0;
                var row2 = slim.Deserialize(ms) as SimplePersonRow;
                Aver.AreEqual(row.ID, row2.ID);
              }
            }
        }));

        Task.WaitAll(tasks.ToArray());

        var el = sw.ElapsedMilliseconds;
        var total = CNT * tcnt;
        Console.WriteLine("Slim did {0:n0} in {1:n0} ms at {2:n0} ops/sec".Args( total, el, total / (el/1000d)));
      }

See More Examples