From: David Pflug <david@pflug.io>
To: 68455@debbugs.gnu.org
Cc: David Pflug <david@pflug.io>
Subject: [bug#68455] [PATCH] gnu: llama-cpp: Update to 1873.
Date: Sun, 14 Jan 2024 15:32:45 -0500 [thread overview]
Message-ID: <20240114203255.26500-1-david@pflug.io> (raw)
* gnu/packages/machine-learning.scm (llama-cpp): Update to 1873.
Change-Id: I091cd20192743c87b497ea3c5fd18a75ada75d9d
---
gnu/packages/machine-learning.scm | 133 ++++++++++++++++++------------
1 file changed, 78 insertions(+), 55 deletions(-)
diff --git a/gnu/packages/machine-learning.scm b/gnu/packages/machine-learning.scm
index 1616738399..0cdfe7bb08 100644
--- a/gnu/packages/machine-learning.scm
+++ b/gnu/packages/machine-learning.scm
@@ -22,6 +22,7 @@
;;; Copyright © 2023 Navid Afkhami <navid.afkhami@mdc-berlin.de>
;;; Copyright © 2023 Zheng Junjie <873216071@qq.com>
;;; Copyright © 2023 Troy Figiel <troy@troyfigiel.com>
+;;; Copyright © 2023 David Pflug <david@pflug.io>
;;;
;;; This file is part of GNU Guix.
;;;
@@ -517,63 +518,63 @@ (define-public guile-aiscm-next
(deprecated-package "guile-aiscm-next" guile-aiscm))
(define-public llama-cpp
- (let ((commit "f31b5397143009d682db90fd2a6cde83f1ef00eb")
- (revision "0"))
- (package
- (name "llama-cpp")
- (version (git-version "0.0.0" revision commit))
- (source
- (origin
- (method git-fetch)
- (uri (git-reference
- (url "https://github.com/ggerganov/llama.cpp")
- (commit (string-append "master-" (string-take commit 7)))))
- (file-name (git-file-name name version))
- (sha256
- (base32 "0ys6n53n032zq1ll9f3vgxk8sw0qq7x3fi7awsyy13adzp3hn08p"))))
- (build-system cmake-build-system)
- (arguments
- (list
- #:modules '((ice-9 textual-ports)
- (guix build utils)
- ((guix build python-build-system) #:prefix python:)
- (guix build cmake-build-system))
- #:imported-modules `(,@%cmake-build-system-modules
- (guix build python-build-system))
- #:phases
- #~(modify-phases %standard-phases
- (add-before 'install 'install-python-scripts
- (lambda _
- (let ((bin (string-append #$output "/bin/")))
- (define (make-script script)
- (let ((suffix (if (string-suffix? ".py" script) "" ".py")))
- (call-with-input-file
- (string-append "../source/" script suffix)
- (lambda (input)
- (call-with-output-file (string-append bin script)
- (lambda (output)
- (format output "#!~a/bin/python3\n~a"
- #$(this-package-input "python")
- (get-string-all input))))))
- (chmod (string-append bin script) #o555)))
- (mkdir-p bin)
- (make-script "convert-pth-to-ggml")
- (make-script "convert-lora-to-ggml")
- (make-script "convert"))))
- (add-after 'install-python-scripts 'wrap-python-scripts
- (assoc-ref python:%standard-phases 'wrap))
- (replace 'install
- (lambda _
- (copy-file "bin/main" (string-append #$output "/bin/llama")))))))
- (inputs (list python))
- (propagated-inputs
- (list python-numpy python-pytorch python-sentencepiece))
- (home-page "https://github.com/ggerganov/llama.cpp")
- (synopsis "Port of Facebook's LLaMA model in C/C++")
- (description "This package provides a port to Facebook's LLaMA collection
+ (package
+ (name "llama-cpp")
+ (version "1873")
+ (source
+ (origin
+ (method git-fetch)
+ (uri (git-reference
+ (url "https://github.com/ggerganov/llama.cpp")
+ (commit (string-append "b" version))))
+ (file-name (git-file-name name version))
+ (sha256
+ (base32 "11may9gkafg5bfma5incijvkypjgx9778gmygxp3x2dz1140809d"))))
+ (build-system cmake-build-system)
+ (arguments
+ (list
+ #:modules '((ice-9 textual-ports)
+ (guix build utils)
+ ((guix build python-build-system) #:prefix python:)
+ (guix build cmake-build-system))
+ #:imported-modules `(,@%cmake-build-system-modules
+ (guix build python-build-system))
+ #:phases
+ #~(modify-phases %standard-phases
+ (add-before 'install 'install-python-scripts
+ (lambda _
+ (let ((bin (string-append #$output "/bin/")))
+ (define (make-script script)
+ (let ((suffix (if (string-suffix? ".py" script) "" ".py")))
+ (call-with-input-file
+ (string-append "../source/" script suffix)
+ (lambda (input)
+ (call-with-output-file (string-append bin script)
+ (lambda (output)
+ (format output "#!~a/bin/python3\n~a"
+ #$(this-package-input "python")
+ (get-string-all input))))))
+ (chmod (string-append bin script) #o555)))
+ (mkdir-p bin)
+ (make-script "convert-hf-to-gguf")
+ (make-script "convert-llama-ggml-to-gguf")
+ (make-script "convert-lora-to-ggml")
+ (make-script "convert-persimmon-to-gguf")
+ (make-script "convert"))))
+ (add-after 'install-python-scripts 'wrap-python-scripts
+ (assoc-ref python:%standard-phases 'wrap))
+ (replace 'install
+ (lambda _
+ (copy-file "bin/main" (string-append #$output "/bin/llama")))))))
+ (inputs (list python))
+ (propagated-inputs
+ (list python-numpy python-pytorch python-sentencepiece python-gguf))
+ (home-page "https://github.com/ggerganov/llama.cpp")
+ (synopsis "Port of Facebook's LLaMA model in C/C++")
+ (description "This package provides a port to Facebook's LLaMA collection
of foundation language models. It requires models parameters to be downloaded
independently to be able to run a LLaMA model.")
- (license license:expat))))
+ (license license:expat)))
(define-public mcl
(package
@@ -5257,3 +5258,25 @@ (define-public oneapi-dnnl
"OneAPI Deep Neural Network Library (oneDNN) is a cross-platform
performance library of basic building blocks for deep learning applications.")
(license license:asl2.0)))
+
+(define-public python-gguf
+ (package
+ (name "python-gguf")
+ (version "0.6.0")
+ (source
+ (origin
+ (method url-fetch)
+ (uri (pypi-uri "gguf" version))
+ (sha256
+ (base32 "0rbyc2h3kpqnrvbyjvv8a69l577jv55a31l12jnw21m1lamjxqmj"))))
+ (build-system pyproject-build-system)
+ (arguments
+ `(#:phases
+ (modify-phases %standard-phases
+ (delete 'check))))
+ (inputs (list poetry python-pytest))
+ (propagated-inputs (list python-numpy))
+ (home-page "https://ggml.ai")
+ (synopsis "Read and write ML models in GGUF for GGML")
+ (description "Read and write ML models in GGUF for GGML")
+ (license license:expat)))
base-commit: 18393fcdddf5c3d834fa89ebf5f3925fc5b166ed
--
2.41.0
next reply other threads:[~2024-01-14 20:34 UTC|newest]
Thread overview: 3+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-01-14 20:32 David Pflug [this message]
2024-01-17 17:29 ` [bug#68455] [PATCH] gnu: llama-cpp: Update to 1873 Mathieu Othacehe
2024-01-26 12:20 ` [bug#68455] [PATCH v2] " David Pflug
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240114203255.26500-1-david@pflug.io \
--to=david@pflug.io \
--cc=68455@debbugs.gnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
Code repositories for project(s) associated with this external index
https://git.savannah.gnu.org/cgit/guix.git
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.