all messages for Guix-related lists mirrored at yhetil.org
 help / color / mirror / code / Atom feed
From: David Pflug <david@pflug.io>
To: 68455@debbugs.gnu.org
Cc: David Pflug <david@pflug.io>
Subject: [bug#68455] [PATCH v2] gnu: llama-cpp: Update to 1873.
Date: Fri, 26 Jan 2024 07:20:21 -0500	[thread overview]
Message-ID: <20240126122110.10991-1-david@pflug.io> (raw)
In-Reply-To: <20240114203255.26500-1-david@pflug.io>

* gnu/packages/machine-learning.scm (llama-cpp): Update to 1873.

python-gguf added by #68735

Change-Id: I091cd20192743c87b497ea3c5fd18a75ada75d9d
---
 gnu/packages/machine-learning.scm | 110 +++++++++++++++---------------
 1 file changed, 55 insertions(+), 55 deletions(-)

diff --git a/gnu/packages/machine-learning.scm b/gnu/packages/machine-learning.scm
index 0e88f7265b..1d590d1c1b 100644
--- a/gnu/packages/machine-learning.scm
+++ b/gnu/packages/machine-learning.scm
@@ -519,63 +519,63 @@ (define-public guile-aiscm-next
   (deprecated-package "guile-aiscm-next" guile-aiscm))
 
 (define-public llama-cpp
-  (let ((commit "f31b5397143009d682db90fd2a6cde83f1ef00eb")
-        (revision "0"))
-    (package
-      (name "llama-cpp")
-      (version (git-version "0.0.0" revision commit))
-      (source
-       (origin
-         (method git-fetch)
-         (uri (git-reference
-               (url "https://github.com/ggerganov/llama.cpp")
-               (commit (string-append "master-" (string-take commit 7)))))
-         (file-name (git-file-name name version))
-         (sha256
-          (base32 "0ys6n53n032zq1ll9f3vgxk8sw0qq7x3fi7awsyy13adzp3hn08p"))))
-      (build-system cmake-build-system)
-      (arguments
-       (list
-        #:modules '((ice-9 textual-ports)
-                    (guix build utils)
-                    ((guix build python-build-system) #:prefix python:)
-                    (guix build cmake-build-system))
-        #:imported-modules `(,@%cmake-build-system-modules
-                             (guix build python-build-system))
-        #:phases
-        #~(modify-phases %standard-phases
-            (add-before 'install 'install-python-scripts
-              (lambda _
-                (let ((bin (string-append #$output "/bin/")))
-                  (define (make-script script)
-                    (let ((suffix (if (string-suffix? ".py" script) "" ".py")))
-                      (call-with-input-file
-                          (string-append "../source/" script suffix)
-                        (lambda (input)
-                          (call-with-output-file (string-append bin script)
-                            (lambda (output)
-                              (format output "#!~a/bin/python3\n~a"
-                                      #$(this-package-input "python")
-                                      (get-string-all input))))))
-                      (chmod (string-append bin script) #o555)))
-                  (mkdir-p bin)
-                  (make-script "convert-pth-to-ggml")
-                  (make-script "convert-lora-to-ggml")
-                  (make-script "convert"))))
-            (add-after 'install-python-scripts 'wrap-python-scripts
-              (assoc-ref python:%standard-phases 'wrap))
-            (replace 'install
-              (lambda _
-                (copy-file "bin/main" (string-append #$output "/bin/llama")))))))
-      (inputs (list python))
-      (propagated-inputs
-       (list python-numpy python-pytorch python-sentencepiece))
-      (home-page "https://github.com/ggerganov/llama.cpp")
-      (synopsis "Port of Facebook's LLaMA model in C/C++")
-      (description "This package provides a port to Facebook's LLaMA collection
+  (package
+    (name "llama-cpp")
+    (version "1873")
+    (source
+     (origin
+       (method git-fetch)
+       (uri (git-reference
+             (url "https://github.com/ggerganov/llama.cpp")
+             (commit (string-append "b" version))))
+       (file-name (git-file-name name version))
+       (sha256
+        (base32 "11may9gkafg5bfma5incijvkypjgx9778gmygxp3x2dz1140809d"))))
+    (build-system cmake-build-system)
+    (arguments
+     (list
+      #:modules '((ice-9 textual-ports)
+                  (guix build utils)
+                  ((guix build python-build-system) #:prefix python:)
+                  (guix build cmake-build-system))
+      #:imported-modules `(,@%cmake-build-system-modules
+                           (guix build python-build-system))
+      #:phases
+      #~(modify-phases %standard-phases
+          (add-before 'install 'install-python-scripts
+            (lambda _
+              (let ((bin (string-append #$output "/bin/")))
+                (define (make-script script)
+                  (let ((suffix (if (string-suffix? ".py" script) "" ".py")))
+                    (call-with-input-file
+                        (string-append "../source/" script suffix)
+                      (lambda (input)
+                        (call-with-output-file (string-append bin script)
+                          (lambda (output)
+                            (format output "#!~a/bin/python3\n~a"
+                                    #$(this-package-input "python")
+                                    (get-string-all input))))))
+                    (chmod (string-append bin script) #o555)))
+                (mkdir-p bin)
+                (make-script "convert-hf-to-gguf")
+                (make-script "convert-llama-ggml-to-gguf")
+                (make-script "convert-lora-to-ggml")
+                (make-script "convert-persimmon-to-gguf")
+                (make-script "convert"))))
+          (add-after 'install-python-scripts 'wrap-python-scripts
+            (assoc-ref python:%standard-phases 'wrap))
+          (replace 'install
+            (lambda _
+              (copy-file "bin/main" (string-append #$output "/bin/llama")))))))
+    (inputs (list python))
+    (propagated-inputs
+     (list python-numpy python-pytorch python-sentencepiece python-gguf))
+    (home-page "https://github.com/ggerganov/llama.cpp")
+    (synopsis "Port of Facebook's LLaMA model in C/C++")
+    (description "This package provides a port to Facebook's LLaMA collection
 of foundation language models.  It requires models parameters to be downloaded
 independently to be able to run a LLaMA model.")
-      (license license:expat))))
+    (license license:expat)))
 
 (define-public mcl
   (package

base-commit: c5453fbfeb0dbd19cb402199fe1e5ad51a051e56
-- 
2.41.0





      parent reply	other threads:[~2024-01-26 12:22 UTC|newest]

Thread overview: 3+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-01-14 20:32 [bug#68455] [PATCH] gnu: llama-cpp: Update to 1873 David Pflug
2024-01-17 17:29 ` Mathieu Othacehe
2024-01-26 12:20 ` David Pflug [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240126122110.10991-1-david@pflug.io \
    --to=david@pflug.io \
    --cc=68455@debbugs.gnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
Code repositories for project(s) associated with this external index

	https://git.savannah.gnu.org/cgit/guix.git

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.