{"payload":{"feedbackUrl":"https://github.com/orgs/community/discussions/53140","repo":{"id":45717250,"defaultBranch":"master","name":"tensorflow","ownerLogin":"tensorflow","currentUserCanPush":false,"isFork":false,"isEmpty":false,"createdAt":"2015-11-07T01:19:20.000Z","ownerAvatar":"https://avatars.githubusercontent.com/u/15658638?v=4","public":true,"private":false,"isOrgOwned":true},"refInfo":{"name":"","listCacheKey":"v0:1719356334.0","currentOid":""},"activityList":{"items":[{"before":"e1d610bef837aa6e178db4b18fb1299391cdab6f","after":"bb023bb59b0f0f6b130f771809d6e38c81060186","ref":"refs/heads/exported_pr_646506658","pushedAt":"2024-06-25T23:02:02.000Z","pushType":"force_push","commitsCount":0,"pusher":{"login":"copybara-service[bot]","name":null,"path":"/apps/copybara-service","primaryAvatarUrl":"https://avatars.githubusercontent.com/in/44061?s=80&v=4"},"commit":{"message":"[xla:cpu] Move BufferAllocations implementation to header file\n\nResolving buffer slice device memory is on a critical path of every thunk. Move implementation to header and force inlining to improve performance of ultra small kernels.\n\nPiperOrigin-RevId: 646506658","shortMessageHtmlLink":"[xla:cpu] Move BufferAllocations implementation to header file"}},{"before":"0534bbf283b6cdab41261feeb6eacbf3946c0e55","after":"b3b9a201e26feeb56ade42a919ce1329b2d36f37","ref":"refs/heads/exported_pr_646316826","pushedAt":"2024-06-25T23:01:58.000Z","pushType":"force_push","commitsCount":0,"pusher":{"login":"copybara-service[bot]","name":null,"path":"/apps/copybara-service","primaryAvatarUrl":"https://avatars.githubusercontent.com/in/44061?s=80&v=4"},"commit":{"message":"[xla:cpu] NFC: Micro-optimizations for KernelThunk\n\nPiperOrigin-RevId: 646316826","shortMessageHtmlLink":"[xla:cpu] NFC: Micro-optimizations for KernelThunk"}},{"before":"41cf863abd5e7942f2f165de7b216627a4dcb946","after":"61df87fddd4bc2dcc0f49c83a3a4e63051d78a23","ref":"refs/heads/exported_pr_646311089","pushedAt":"2024-06-25T23:01:24.000Z","pushType":"force_push","commitsCount":0,"pusher":{"login":"copybara-service[bot]","name":null,"path":"/apps/copybara-service","primaryAvatarUrl":"https://avatars.githubusercontent.com/in/44061?s=80&v=4"},"commit":{"message":"[xla:cpu] Optimize KernelThunk by passing SE_HOST_KernelArg directly to the kernel\n\nPiperOrigin-RevId: 646311089","shortMessageHtmlLink":"[xla:cpu] Optimize KernelThunk by passing SE_HOST_KernelArg directly …"}},{"before":"dea0e2a6f8beca190ca138093e669d42ee244056","after":null,"ref":"refs/heads/exported_pr_646307526","pushedAt":"2024-06-25T22:58:54.000Z","pushType":"branch_deletion","commitsCount":0,"pusher":{"login":"copybara-service[bot]","name":null,"path":"/apps/copybara-service","primaryAvatarUrl":"https://avatars.githubusercontent.com/in/44061?s=80&v=4"}},{"before":"2d72742d40f1d3121c895f8584ec8882d1e97fc8","after":"dea0e2a6f8beca190ca138093e669d42ee244056","ref":"refs/heads/master","pushedAt":"2024-06-25T22:58:52.000Z","pushType":"push","commitsCount":1,"pusher":{"login":"copybara-service[bot]","name":null,"path":"/apps/copybara-service","primaryAvatarUrl":"https://avatars.githubusercontent.com/in/44061?s=80&v=4"},"commit":{"message":"[xla:cpu] Optimize KernelThunk host kernel loading\n\nPiperOrigin-RevId: 646631043","shortMessageHtmlLink":"[xla:cpu] Optimize KernelThunk host kernel loading"}},{"before":"d9070be67176cc33afc273d1a7cf1536b56f6dc3","after":"dea0e2a6f8beca190ca138093e669d42ee244056","ref":"refs/heads/exported_pr_646307526","pushedAt":"2024-06-25T22:58:51.000Z","pushType":"force_push","commitsCount":0,"pusher":{"login":"copybara-service[bot]","name":null,"path":"/apps/copybara-service","primaryAvatarUrl":"https://avatars.githubusercontent.com/in/44061?s=80&v=4"},"commit":{"message":"[xla:cpu] Optimize KernelThunk host kernel loading\n\nPiperOrigin-RevId: 646631043","shortMessageHtmlLink":"[xla:cpu] Optimize KernelThunk host kernel loading"}},{"before":"11de673c76d93eb25e253ebedeab5641d2d1505c","after":"1a097f01d21019dad2b4c1649e87a82bfd570f8f","ref":"refs/heads/exported_pr_646546143","pushedAt":"2024-06-25T22:52:31.000Z","pushType":"force_push","commitsCount":0,"pusher":{"login":"copybara-service[bot]","name":null,"path":"/apps/copybara-service","primaryAvatarUrl":"https://avatars.githubusercontent.com/in/44061?s=80&v=4"},"commit":{"message":"Some internal changes.\n\nPiperOrigin-RevId: 646546143","shortMessageHtmlLink":"Some internal changes."}},{"before":null,"after":"5635752cb355b34cc6ba75aef0a553b6bfac0daa","ref":"refs/heads/exported_pr_642775989","pushedAt":"2024-06-25T22:51:18.000Z","pushType":"branch_creation","commitsCount":0,"pusher":{"login":"copybara-service[bot]","name":null,"path":"/apps/copybara-service","primaryAvatarUrl":"https://avatars.githubusercontent.com/in/44061?s=80&v=4"},"commit":{"message":"Support Cuda Graph in XProf\n\nPiperOrigin-RevId: 642775989","shortMessageHtmlLink":"Support Cuda Graph in XProf"}},{"before":"2d72742d40f1d3121c895f8584ec8882d1e97fc8","after":null,"ref":"refs/heads/exported_pr_645085937","pushedAt":"2024-06-25T22:51:05.000Z","pushType":"branch_deletion","commitsCount":0,"pusher":{"login":"copybara-service[bot]","name":null,"path":"/apps/copybara-service","primaryAvatarUrl":"https://avatars.githubusercontent.com/in/44061?s=80&v=4"}},{"before":"f47f49c708571bb9eae5f2464f2fd0f2ef2ee9f8","after":"2d72742d40f1d3121c895f8584ec8882d1e97fc8","ref":"refs/heads/master","pushedAt":"2024-06-25T22:51:04.000Z","pushType":"push","commitsCount":1,"pusher":{"login":"copybara-service[bot]","name":null,"path":"/apps/copybara-service","primaryAvatarUrl":"https://avatars.githubusercontent.com/in/44061?s=80&v=4"},"commit":{"message":"Add tensorflow support for 16k page sizes on arm64\n\nTested both libtensorflowlite.so and libtensorflowlite_jni.so to ensure both\nlibraries are 16k ELF aligned with this change:\n\n$ objdump -p bazel-bin/tensorflow/lite/libtensorflowlite.so | grep LOAD | awk '{ print $1 \" \" $NF }'\nLOAD 2**14\nLOAD 2**14\n\n$ objdump -p bazel-bin/tensorflow/lite/java/libtensorflowlite_jni.so | grep LOAD | awk '{ print $1 \" \" $NF }'\nLOAD 2**14\nLOAD 2**14\n\nPiperOrigin-RevId: 646629366","shortMessageHtmlLink":"Add tensorflow support for 16k page sizes on arm64"}},{"before":"e722583224df0fcbaa9c159ec2d79af7c107b734","after":"2d72742d40f1d3121c895f8584ec8882d1e97fc8","ref":"refs/heads/exported_pr_645085937","pushedAt":"2024-06-25T22:51:02.000Z","pushType":"force_push","commitsCount":0,"pusher":{"login":"copybara-service[bot]","name":null,"path":"/apps/copybara-service","primaryAvatarUrl":"https://avatars.githubusercontent.com/in/44061?s=80&v=4"},"commit":{"message":"Add tensorflow support for 16k page sizes on arm64\n\nTested both libtensorflowlite.so and libtensorflowlite_jni.so to ensure both\nlibraries are 16k ELF aligned with this change:\n\n$ objdump -p bazel-bin/tensorflow/lite/libtensorflowlite.so | grep LOAD | awk '{ print $1 \" \" $NF }'\nLOAD 2**14\nLOAD 2**14\n\n$ objdump -p bazel-bin/tensorflow/lite/java/libtensorflowlite_jni.so | grep LOAD | awk '{ print $1 \" \" $NF }'\nLOAD 2**14\nLOAD 2**14\n\nPiperOrigin-RevId: 646629366","shortMessageHtmlLink":"Add tensorflow support for 16k page sizes on arm64"}},{"before":"c990d0d9a724738251a2a28a75ad0ecba6bdef55","after":"c76f53338aa644126095b617b62ae755b4232e88","ref":"refs/heads/exported_pr_616865795","pushedAt":"2024-06-25T22:42:41.000Z","pushType":"force_push","commitsCount":0,"pusher":{"login":"copybara-service[bot]","name":null,"path":"/apps/copybara-service","primaryAvatarUrl":"https://avatars.githubusercontent.com/in/44061?s=80&v=4"},"commit":{"message":"Introduce hermetic CUDA in Google ML projects.\n\nInstead of having pre-installed NVIDIA CUDA and CUDNN libraries and setting environment variables pointing to the installation locations, Bazel should automatically download CUDA and CUDNN distributives in the cache and use them during build and test phases.\n\nPiperOrigin-RevId: 616865795","shortMessageHtmlLink":"Introduce hermetic CUDA in Google ML projects."}},{"before":"acd661f1d8a2f87e7b1618587372bbf0eb1df822","after":"e17d963ea1f64ca49f9574fb90dae12c46a1494b","ref":"refs/heads/exported_pr_646484108","pushedAt":"2024-06-25T22:42:39.000Z","pushType":"force_push","commitsCount":0,"pusher":{"login":"copybara-service[bot]","name":null,"path":"/apps/copybara-service","primaryAvatarUrl":"https://avatars.githubusercontent.com/in/44061?s=80&v=4"},"commit":{"message":"PR #13952: [XLA:GPU][AutoSharding] Enable autosharding in GPU compiler compilation\n\nImported from GitHub PR https://github.com/openxla/xla/pull/13952\n\n## Objective:\nThis pull request aims to resolve compilation issues related to enabling GPU Auto Sharding functionality in the `auto_sharding_gpu_compiler_test.cc` file. fix #13612\n\n## Background\nWhen compiling the `auto_sharding_gpu_compiler_test.cc` using the command:\n\n```shell\nTF_CUDA_COMPUTE_CAPABILITIES=\"8.0\" XLA_CUDA=1 bazel build xla/service/gpu:auto_sharding_gpu_compiler_test --config=cuda\n```\nWhile the compilation is successful, the unit test subsequently fails. Upon investigation, the failure seems to be linked to the following warning, which suggests that auto sharding is not enabled.\n\n```shell\n2024-06-19 17:44:19.985341: E xla/service/gpu/gpu_compiler.cc:552] GPU autosharding is not yet available in open source.\n```\n## Changes Made\n\n1. To enable GPU Auto Sharding, I removed several conditional macros in `gpu_compiler.cc`. However, this modification led to a compilation error:\n ```shell\n xla/xla/service/gpu/BUILD:3402:11: Compiling xla/service/gpu/gpu_compiler.cc failed: (Exit 1): crosstool_wrapper_driver_is_not_gcc failed: error executing command (from target //xla/service/gpu:gpu_compiler) external/local_config_cuda/crosstool/clang/bin/crosstool_wrapper_driver_is_not_gcc -MD -MF bazel-out/k8-opt/bin/xla/service/gpu/_objs/gpu_compiler/gpu_compiler.pic.d ... (remaining 519 arguments skipped)\n In file included from ./xla/hlo/experimental/auto_sharding/auto_sharding.h:37,\n from xla/service/gpu/gpu_compiler.cc:253:\n ./xla/hlo/experimental/auto_sharding/auto_sharding_solver.h:25:10: fatal error: xla/hlo/experimental/auto_sharding/auto_sharding.pb.h: No such file or directory\n 25 | #include \"xla/hlo/experimental/auto_sharding/auto_sharding.pb.h\"\n | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n compilation terminated.\n ```\n\n2. Upon reviewing the dependencies listed in the `BUILD` file for the `gpu_compiler` module, I discovered that the auto sharding module was not included. So I added the missing auto sharding module to the dependencies of the GPU compiler in the `BUILD` file.\n\n## Results\n\nPost-modification, the unit test now passes without errors, confirming that the changes fix the related issues.\nCopybara import of the project:\n\n--\n71be6a3588a1d025f6ca75d9c1f53f78fb1645ff by luzhan :\n\n[XLA:GPU][AutoSharding] Enable autosharding in GPU compiler compilation\n\n--\n235bb7d9c211227cab3e32494c1d547bf8d1c3f2 by luzhan :\n\nfix: add ortools patch to enable compiling of torch/xla and tensorflow\n\nMerging this change closes #13952\n\nFUTURE_COPYBARA_INTEGRATE_REVIEW=https://github.com/openxla/xla/pull/13952 from lausannel:main 235bb7d9c211227cab3e32494c1d547bf8d1c3f2\nPiperOrigin-RevId: 646484108","shortMessageHtmlLink":"PR #13952: [XLA:GPU][AutoSharding] Enable autosharding in GPU compile…"}},{"before":"3c3811e7605c6d7e9e2bde7dd48930bafe0f1fa4","after":"fb853d64f64805e7371fb0dab7197d7606def565","ref":"refs/heads/exported_pr_638337000","pushedAt":"2024-06-25T22:42:30.000Z","pushType":"force_push","commitsCount":0,"pusher":{"login":"copybara-service[bot]","name":null,"path":"/apps/copybara-service","primaryAvatarUrl":"https://avatars.githubusercontent.com/in/44061?s=80&v=4"},"commit":{"message":"Add additional overloaded versions of the BufferFromHostLiteral function in PjRtClient which take a device_layout parameter.\n\nPiperOrigin-RevId: 638337000","shortMessageHtmlLink":"Add additional overloaded versions of the BufferFromHostLiteral funct…"}},{"before":"4b518d37677dd4ea3e4352a5f48e0e8dbea2e514","after":"d2ade4efe6cbc742c40eba23b30e9c18ff77db7f","ref":"refs/heads/exported_pr_646254898","pushedAt":"2024-06-25T22:32:14.000Z","pushType":"force_push","commitsCount":0,"pusher":{"login":"copybara-service[bot]","name":null,"path":"/apps/copybara-service","primaryAvatarUrl":"https://avatars.githubusercontent.com/in/44061?s=80&v=4"},"commit":{"message":"Add `xla/package_groups.bzl` and `xla/tsl/package_groups.bzl` to hold `package_groups` and replace Copybara rules\n\nPiperOrigin-RevId: 646254898","shortMessageHtmlLink":"Add xla/package_groups.bzl and xla/tsl/package_groups.bzl to hold…"}},{"before":"f47f49c708571bb9eae5f2464f2fd0f2ef2ee9f8","after":null,"ref":"refs/heads/exported_pr_646594798","pushedAt":"2024-06-25T22:31:17.000Z","pushType":"branch_deletion","commitsCount":0,"pusher":{"login":"copybara-service[bot]","name":null,"path":"/apps/copybara-service","primaryAvatarUrl":"https://avatars.githubusercontent.com/in/44061?s=80&v=4"}},{"before":"038957a5b6cde41376a96fcc42eaab617881a4d3","after":"f47f49c708571bb9eae5f2464f2fd0f2ef2ee9f8","ref":"refs/heads/master","pushedAt":"2024-06-25T22:31:15.000Z","pushType":"push","commitsCount":1,"pusher":{"login":"copybara-service[bot]","name":null,"path":"/apps/copybara-service","primaryAvatarUrl":"https://avatars.githubusercontent.com/in/44061?s=80&v=4"},"commit":{"message":"Properly override repository for JAX builds in `build.py`\n\nAlso change an `if_static` to an `if_google` to fix JAX builds\n\nPiperOrigin-RevId: 646622609","shortMessageHtmlLink":"Properly override repository for JAX builds in build.py"}},{"before":"314164ecb376216cc4b8ebb78f1e675c13ecb5ee","after":"f47f49c708571bb9eae5f2464f2fd0f2ef2ee9f8","ref":"refs/heads/exported_pr_646594798","pushedAt":"2024-06-25T22:31:14.000Z","pushType":"force_push","commitsCount":0,"pusher":{"login":"copybara-service[bot]","name":null,"path":"/apps/copybara-service","primaryAvatarUrl":"https://avatars.githubusercontent.com/in/44061?s=80&v=4"},"commit":{"message":"Properly override repository for JAX builds in `build.py`\n\nAlso change an `if_static` to an `if_google` to fix JAX builds\n\nPiperOrigin-RevId: 646622609","shortMessageHtmlLink":"Properly override repository for JAX builds in build.py"}},{"before":"b7c0e234589c398dfd05be41815272fd28b7d155","after":"a2680c5e447c2d4b72e49ee9c14a664272dc24ff","ref":"refs/heads/exported_pr_646528556","pushedAt":"2024-06-25T22:29:52.000Z","pushType":"force_push","commitsCount":0,"pusher":{"login":"copybara-service[bot]","name":null,"path":"/apps/copybara-service","primaryAvatarUrl":"https://avatars.githubusercontent.com/in/44061?s=80&v=4"},"commit":{"message":"Remove unused VlogOccupancyInfo calls.\n\nPiperOrigin-RevId: 646528556","shortMessageHtmlLink":"Remove unused VlogOccupancyInfo calls."}},{"before":"a91a415ded78b2ae8281abdebdf9b20b2e13a137","after":"48ade3d1d9d6d7c134a5f030b1a886dc8727d37c","ref":"refs/heads/exported_pr_646461734","pushedAt":"2024-06-25T22:13:53.000Z","pushType":"force_push","commitsCount":0,"pusher":{"login":"copybara-service[bot]","name":null,"path":"/apps/copybara-service","primaryAvatarUrl":"https://avatars.githubusercontent.com/in/44061?s=80&v=4"},"commit":{"message":"PR #13813: [NVIDIA GPU] Assign a fixed index for cached activation\n\nImported from GitHub PR https://github.com/openxla/xla/pull/13813\n\ngpu_windowed_einsum_handler pass has been re-using the empty buffer of the transformed while loop. This buffer is given by the spmd dot_handler pass. The shape of the buffer has changed from the allgathered shape of the sharded operand to the output shape of the dot which leads to a shape incompatibility error. To make the gpu handler completely safe, we will make a new element in the tuple to host the cached activation with the desired shape.\nThe slice index of where to write the slice into the full buffer also changes based on whether it's contracting or non-contracting dim is sharded. With the new element, we will need to determine the slice index ourselves in the handler pass.\nCopybara import of the project:\n\n--\nceeff8e5da8ecb3f382bbd8dee83e2f0c909b22d by TJ Xu :\n\nAssign a fixed index for cached activation\nCache correct activation slice when contracting dim is sharded\n\n--\n233763b8efb4ab0045eb998b437c7b28c8f776c8 by TJ Xu :\n\nSimplified logic in gpu einsum handler to be more generic\n\n--\n2220cd1a022ad519cd23ab36c31c70c9627fc76d by TJ Xu :\n\nremove un-used variables\n\nMerging this change closes #13813\n\nFUTURE_COPYBARA_INTEGRATE_REVIEW=https://github.com/openxla/xla/pull/13813 from Tixxx:tixxx/ag_multi_fix 2220cd1a022ad519cd23ab36c31c70c9627fc76d\nPiperOrigin-RevId: 646461734","shortMessageHtmlLink":"PR #13813: [NVIDIA GPU] Assign a fixed index for cached activation"}},{"before":"f3bb75871dab09c0a68cd53060d71ce83bcdc776","after":"e722583224df0fcbaa9c159ec2d79af7c107b734","ref":"refs/heads/exported_pr_645085937","pushedAt":"2024-06-25T22:09:13.000Z","pushType":"force_push","commitsCount":0,"pusher":{"login":"copybara-service[bot]","name":null,"path":"/apps/copybara-service","primaryAvatarUrl":"https://avatars.githubusercontent.com/in/44061?s=80&v=4"},"commit":{"message":"Add tensorflow support for 16k page sizes on arm64\n\nTested both libtensorflowlite.so and libtensorflowlite_jni.so to ensure both\nlibraries are 16k ELF aligned with this change:\n\n$ objdump -p bazel-bin/tensorflow/lite/libtensorflowlite.so | grep LOAD | awk '{ print $1 \" \" $NF }'\nLOAD 2**14\nLOAD 2**14\n\n$ objdump -p bazel-bin/tensorflow/lite/java/libtensorflowlite_jni.so | grep LOAD | awk '{ print $1 \" \" $NF }'\nLOAD 2**14\nLOAD 2**14\n\nPiperOrigin-RevId: 645085937","shortMessageHtmlLink":"Add tensorflow support for 16k page sizes on arm64"}},{"before":"79b36c736a2761584c577dea06b68368ed0632b0","after":"f9d3e7b5e3beba7aa96e2ef77b5dc48b4337a61c","ref":"refs/heads/exported_pr_644267887","pushedAt":"2024-06-25T22:06:03.000Z","pushType":"force_push","commitsCount":0,"pusher":{"login":"copybara-service[bot]","name":null,"path":"/apps/copybara-service","primaryAvatarUrl":"https://avatars.githubusercontent.com/in/44061?s=80&v=4"},"commit":{"message":"Change AllReduceSimplifier to handle trivial cross-partition all-reduces.\n\nThis CL ensures that AllReduceSimplifier can simplify trivial all-reduces (an all-reduce where each subgroup is formed of a single participant) that are not necessarily cross replica (for example a cross partition all-reduce). We only simplify non cross replica all-reduce when the module is SPMD.\n\nPiperOrigin-RevId: 644267887","shortMessageHtmlLink":"Change AllReduceSimplifier to handle trivial cross-partition all-redu…"}},{"before":"ac75d3f4f75c9084ba5ce69f7e400e45dd91e18a","after":"314164ecb376216cc4b8ebb78f1e675c13ecb5ee","ref":"refs/heads/exported_pr_646594798","pushedAt":"2024-06-25T22:05:41.000Z","pushType":"force_push","commitsCount":0,"pusher":{"login":"copybara-service[bot]","name":null,"path":"/apps/copybara-service","primaryAvatarUrl":"https://avatars.githubusercontent.com/in/44061?s=80&v=4"},"commit":{"message":"Properly override repository for JAX builds in `build.py`\n\nAlso change an `if_static` to an `if_google` to fix JAX builds\n\nPiperOrigin-RevId: 646594798","shortMessageHtmlLink":"Properly override repository for JAX builds in build.py"}},{"before":"e8add3d25235a9752a2c2195b3a3b702ae4ab39b","after":"a8b3750c8e35155e053cbdca4a08fb58b8523f9e","ref":"refs/heads/exported_pr_644309633","pushedAt":"2024-06-25T22:04:30.000Z","pushType":"force_push","commitsCount":0,"pusher":{"login":"copybara-service[bot]","name":null,"path":"/apps/copybara-service","primaryAvatarUrl":"https://avatars.githubusercontent.com/in/44061?s=80&v=4"},"commit":{"message":"[Triton] Refactoring condition in autotuner to be more robust. Added test to make sure crashing Triton configurations are actually skipped and to guard against breaking it.\n\nPiperOrigin-RevId: 644309633","shortMessageHtmlLink":"[Triton] Refactoring condition in autotuner to be more robust. Added …"}},{"before":"2f154c38b4e4b597ff8e7d931607d6cbb258a425","after":"da54c11edb9ecd28f4f05409f5c4e986fce79616","ref":"refs/heads/exported_pr_644497618","pushedAt":"2024-06-25T22:00:24.000Z","pushType":"force_push","commitsCount":0,"pusher":{"login":"copybara-service[bot]","name":null,"path":"/apps/copybara-service","primaryAvatarUrl":"https://avatars.githubusercontent.com/in/44061?s=80&v=4"},"commit":{"message":"move mlir2exec test to tensorflow lite\n\nPiperOrigin-RevId: 644497618","shortMessageHtmlLink":"move mlir2exec test to tensorflow lite"}},{"before":"038957a5b6cde41376a96fcc42eaab617881a4d3","after":null,"ref":"refs/heads/exported_pr_646583702","pushedAt":"2024-06-25T21:59:33.000Z","pushType":"branch_deletion","commitsCount":0,"pusher":{"login":"copybara-service[bot]","name":null,"path":"/apps/copybara-service","primaryAvatarUrl":"https://avatars.githubusercontent.com/in/44061?s=80&v=4"}},{"before":"59e731c177ca597e0429d070ac929361de6c1603","after":"038957a5b6cde41376a96fcc42eaab617881a4d3","ref":"refs/heads/master","pushedAt":"2024-06-25T21:59:32.000Z","pushType":"push","commitsCount":1,"pusher":{"login":"copybara-service[bot]","name":null,"path":"/apps/copybara-service","primaryAvatarUrl":"https://avatars.githubusercontent.com/in/44061?s=80&v=4"},"commit":{"message":"Remove stay type annotation from context().\n\nPiperOrigin-RevId: 646609853","shortMessageHtmlLink":"Remove stay type annotation from context()."}},{"before":"4e3066ecd3ef508d4ca657fd21bf29b5ed3050e3","after":"038957a5b6cde41376a96fcc42eaab617881a4d3","ref":"refs/heads/exported_pr_646583702","pushedAt":"2024-06-25T21:59:30.000Z","pushType":"force_push","commitsCount":0,"pusher":{"login":"copybara-service[bot]","name":null,"path":"/apps/copybara-service","primaryAvatarUrl":"https://avatars.githubusercontent.com/in/44061?s=80&v=4"},"commit":{"message":"Remove stay type annotation from context().\n\nPiperOrigin-RevId: 646609853","shortMessageHtmlLink":"Remove stay type annotation from context()."}},{"before":null,"after":"462bea3b664836a341c40f81f613315d5e5ad218","ref":"refs/heads/exported_pr_646559765","pushedAt":"2024-06-25T21:53:45.000Z","pushType":"branch_creation","commitsCount":0,"pusher":{"login":"copybara-service[bot]","name":null,"path":"/apps/copybara-service","primaryAvatarUrl":"https://avatars.githubusercontent.com/in/44061?s=80&v=4"},"commit":{"message":"[xla:cpu] Add Resource and ResourceUse for tracking run time dependencies coming from `resources`\n\nPiperOrigin-RevId: 646559765","shortMessageHtmlLink":"[xla:cpu] Add Resource and ResourceUse for tracking run time dependen…"}},{"before":"6de9ae6c234987d15faac8795146ad5baf19b808","after":"e1d610bef837aa6e178db4b18fb1299391cdab6f","ref":"refs/heads/exported_pr_646506658","pushedAt":"2024-06-25T21:47:54.000Z","pushType":"force_push","commitsCount":0,"pusher":{"login":"copybara-service[bot]","name":null,"path":"/apps/copybara-service","primaryAvatarUrl":"https://avatars.githubusercontent.com/in/44061?s=80&v=4"},"commit":{"message":"[xla:cpu] Move BufferAllocations implementation to header file\n\nResolving buffer slice device memory is on a critical path of every thunk. Move implementation to header and force inlining to improve performance of ultra small kernels.\n\nPiperOrigin-RevId: 646506658","shortMessageHtmlLink":"[xla:cpu] Move BufferAllocations implementation to header file"}}],"hasNextPage":true,"hasPreviousPage":false,"activityType":"all","actor":null,"timePeriod":"all","sort":"DESC","perPage":30,"cursor":"djE6ks8AAAAEbyM7wgA","startCursor":null,"endCursor":null}},"title":"Activity · tensorflow/tensorflow"}